Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
6 : #define V8_X64_MACRO_ASSEMBLER_X64_H_
7 :
8 : #include "src/bailout-reason.h"
9 : #include "src/base/flags.h"
10 : #include "src/frames.h"
11 : #include "src/globals.h"
12 : #include "src/x64/assembler-x64.h"
13 : #include "src/x64/frames-x64.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : // Give alias names to registers for calling conventions.
19 : const Register kReturnRegister0 = {Register::kCode_rax};
20 : const Register kReturnRegister1 = {Register::kCode_rdx};
21 : const Register kReturnRegister2 = {Register::kCode_r8};
22 : const Register kJSFunctionRegister = {Register::kCode_rdi};
23 : const Register kContextRegister = {Register::kCode_rsi};
24 : const Register kAllocateSizeRegister = {Register::kCode_rdx};
25 : const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
26 : const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
27 : const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
28 : const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
29 : const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
30 : const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
31 : const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
32 : const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
33 :
34 : // Default scratch register used by MacroAssembler (and other code that needs
35 : // a spare register). The register isn't callee save, and not used by the
36 : // function calling convention.
37 : const Register kScratchRegister = {10}; // r10.
38 : const XMMRegister kScratchDoubleReg = {15}; // xmm15.
39 : const Register kRootRegister = {13}; // r13 (callee save).
40 : // Actual value of root register is offset from the root array's start
41 : // to take advantage of negitive 8-bit displacement values.
42 : const int kRootRegisterBias = 128;
43 :
44 : // Convenience for platform-independent signatures.
45 : typedef Operand MemOperand;
46 :
47 : enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
48 : enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
49 : enum PointersToHereCheck {
50 : kPointersToHereMaybeInteresting,
51 : kPointersToHereAreAlwaysInteresting
52 : };
53 :
54 : enum class SmiOperationConstraint {
55 : kPreserveSourceRegister = 1 << 0,
56 : kBailoutOnNoOverflow = 1 << 1,
57 : kBailoutOnOverflow = 1 << 2
58 : };
59 :
60 : enum class ReturnAddressState { kOnStack, kNotOnStack };
61 :
62 : typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
63 :
64 : DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
65 :
66 : #ifdef DEBUG
67 : bool AreAliased(Register reg1,
68 : Register reg2,
69 : Register reg3 = no_reg,
70 : Register reg4 = no_reg,
71 : Register reg5 = no_reg,
72 : Register reg6 = no_reg,
73 : Register reg7 = no_reg,
74 : Register reg8 = no_reg);
75 : #endif
76 :
77 : // Forward declaration.
78 : class JumpTarget;
79 :
80 : struct SmiIndex {
81 : SmiIndex(Register index_register, ScaleFactor scale)
82 : : reg(index_register),
83 : scale(scale) {}
84 : Register reg;
85 : ScaleFactor scale;
86 : };
87 :
88 :
89 : // MacroAssembler implements a collection of frequently used macros.
90 13458129 : class MacroAssembler: public Assembler {
91 : public:
92 : MacroAssembler(Isolate* isolate, void* buffer, int size,
93 : CodeObjectRequired create_code_object);
94 :
95 : int jit_cookie() const { return jit_cookie_; }
96 :
97 : // Prevent the use of the RootArray during the lifetime of this
98 : // scope object.
99 : class NoRootArrayScope BASE_EMBEDDED {
100 : public:
101 : explicit NoRootArrayScope(MacroAssembler* assembler)
102 : : variable_(&assembler->root_array_available_),
103 92344 : old_value_(assembler->root_array_available_) {
104 92344 : assembler->root_array_available_ = false;
105 : }
106 : ~NoRootArrayScope() {
107 92344 : *variable_ = old_value_;
108 : }
109 : private:
110 : bool* variable_;
111 : bool old_value_;
112 : };
113 :
114 : Isolate* isolate() const { return isolate_; }
115 :
116 : // Operand pointing to an external reference.
117 : // May emit code to set up the scratch register. The operand is
118 : // only guaranteed to be correct as long as the scratch register
119 : // isn't changed.
120 : // If the operand is used more than once, use a scratch register
121 : // that is guaranteed not to be clobbered.
122 : Operand ExternalOperand(ExternalReference reference,
123 : Register scratch = kScratchRegister);
124 : // Loads and stores the value of an external reference.
125 : // Special case code for load and store to take advantage of
126 : // load_rax/store_rax if possible/necessary.
127 : // For other operations, just use:
128 : // Operand operand = ExternalOperand(extref);
129 : // operation(operand, ..);
130 : void Load(Register destination, ExternalReference source);
131 : void Store(ExternalReference destination, Register source);
132 : // Loads the address of the external reference into the destination
133 : // register.
134 : void LoadAddress(Register destination, ExternalReference source);
135 : // Returns the size of the code generated by LoadAddress.
136 : // Used by CallSize(ExternalReference) to find the size of a call.
137 : int LoadAddressSize(ExternalReference source);
138 : // Pushes the address of the external reference onto the stack.
139 : void PushAddress(ExternalReference source);
140 :
141 : // Operations on roots in the root-array.
142 : void LoadRoot(Register destination, Heap::RootListIndex index);
143 0 : void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
144 0 : LoadRoot(kScratchRegister, index);
145 0 : movp(destination, kScratchRegister);
146 0 : }
147 : void StoreRoot(Register source, Heap::RootListIndex index);
148 : // Load a root value where the index (or part of it) is variable.
149 : // The variable_offset register is added to the fixed_offset value
150 : // to get the index into the root-array.
151 : void LoadRootIndexed(Register destination,
152 : Register variable_offset,
153 : int fixed_offset);
154 : void CompareRoot(Register with, Heap::RootListIndex index);
155 : void CompareRoot(const Operand& with, Heap::RootListIndex index);
156 : void PushRoot(Heap::RootListIndex index);
157 :
158 : // Compare the object in a register to a value and jump if they are equal.
159 : void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
160 : Label::Distance if_equal_distance = Label::kFar) {
161 19168 : CompareRoot(with, index);
162 19168 : j(equal, if_equal, if_equal_distance);
163 : }
164 : void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
165 : Label* if_equal,
166 : Label::Distance if_equal_distance = Label::kFar) {
167 : CompareRoot(with, index);
168 : j(equal, if_equal, if_equal_distance);
169 : }
170 :
171 : // Compare the object in a register to a value and jump if they are not equal.
172 : void JumpIfNotRoot(Register with, Heap::RootListIndex index,
173 : Label* if_not_equal,
174 : Label::Distance if_not_equal_distance = Label::kFar) {
175 4288 : CompareRoot(with, index);
176 4288 : j(not_equal, if_not_equal, if_not_equal_distance);
177 : }
178 : void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
179 : Label* if_not_equal,
180 : Label::Distance if_not_equal_distance = Label::kFar) {
181 : CompareRoot(with, index);
182 : j(not_equal, if_not_equal, if_not_equal_distance);
183 : }
184 :
185 : // These functions do not arrange the registers in any particular order so
186 : // they are not useful for calls that can cause a GC. The caller can
187 : // exclude up to 3 registers that do not need to be saved and restored.
188 : void PushCallerSaved(SaveFPRegsMode fp_mode,
189 : Register exclusion1 = no_reg,
190 : Register exclusion2 = no_reg,
191 : Register exclusion3 = no_reg);
192 : void PopCallerSaved(SaveFPRegsMode fp_mode,
193 : Register exclusion1 = no_reg,
194 : Register exclusion2 = no_reg,
195 : Register exclusion3 = no_reg);
196 :
197 : // ---------------------------------------------------------------------------
198 : // GC Support
199 :
200 :
201 : enum RememberedSetFinalAction {
202 : kReturnAtEnd,
203 : kFallThroughAtEnd
204 : };
205 :
206 : // Record in the remembered set the fact that we have a pointer to new space
207 : // at the address pointed to by the addr register. Only works if addr is not
208 : // in new space.
209 : void RememberedSetHelper(Register object, // Used for debug code.
210 : Register addr,
211 : Register scratch,
212 : SaveFPRegsMode save_fp,
213 : RememberedSetFinalAction and_then);
214 :
215 : void CheckPageFlag(Register object,
216 : Register scratch,
217 : int mask,
218 : Condition cc,
219 : Label* condition_met,
220 : Label::Distance condition_met_distance = Label::kFar);
221 :
222 : // Check if object is in new space. Jumps if the object is not in new space.
223 : // The register scratch can be object itself, but scratch will be clobbered.
224 44 : void JumpIfNotInNewSpace(Register object,
225 : Register scratch,
226 : Label* branch,
227 : Label::Distance distance = Label::kFar) {
228 38014 : InNewSpace(object, scratch, zero, branch, distance);
229 44 : }
230 :
231 : // Check if object is in new space. Jumps if the object is in new space.
232 : // The register scratch can be object itself, but it will be clobbered.
233 : void JumpIfInNewSpace(Register object,
234 : Register scratch,
235 : Label* branch,
236 : Label::Distance distance = Label::kFar) {
237 38014 : InNewSpace(object, scratch, not_zero, branch, distance);
238 : }
239 :
240 : // Check if an object has the black incremental marking color. Also uses rcx!
241 : void JumpIfBlack(Register object, Register bitmap_scratch,
242 : Register mask_scratch, Label* on_black,
243 : Label::Distance on_black_distance);
244 :
245 : // Checks the color of an object. If the object is white we jump to the
246 : // incremental marker.
247 : void JumpIfWhite(Register value, Register scratch1, Register scratch2,
248 : Label* value_is_white, Label::Distance distance);
249 :
250 : // Notify the garbage collector that we wrote a pointer into an object.
251 : // |object| is the object being stored into, |value| is the object being
252 : // stored. value and scratch registers are clobbered by the operation.
253 : // The offset is the offset from the start of the object, not the offset from
254 : // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
255 : void RecordWriteField(
256 : Register object,
257 : int offset,
258 : Register value,
259 : Register scratch,
260 : SaveFPRegsMode save_fp,
261 : RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
262 : SmiCheck smi_check = INLINE_SMI_CHECK,
263 : PointersToHereCheck pointers_to_here_check_for_value =
264 : kPointersToHereMaybeInteresting);
265 :
266 : // As above, but the offset has the tag presubtracted. For use with
267 : // Operand(reg, off).
268 : void RecordWriteContextSlot(
269 : Register context,
270 : int offset,
271 : Register value,
272 : Register scratch,
273 : SaveFPRegsMode save_fp,
274 : RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
275 : SmiCheck smi_check = INLINE_SMI_CHECK,
276 : PointersToHereCheck pointers_to_here_check_for_value =
277 : kPointersToHereMaybeInteresting) {
278 : RecordWriteField(context,
279 : offset + kHeapObjectTag,
280 : value,
281 : scratch,
282 : save_fp,
283 : remembered_set_action,
284 : smi_check,
285 701346 : pointers_to_here_check_for_value);
286 : }
287 :
288 : // Notify the garbage collector that we wrote a code entry into a
289 : // JSFunction. Only scratch is clobbered by the operation.
290 : void RecordWriteCodeEntryField(Register js_function, Register code_entry,
291 : Register scratch);
292 :
293 : void RecordWriteForMap(
294 : Register object,
295 : Register map,
296 : Register dst,
297 : SaveFPRegsMode save_fp);
298 :
299 : // For page containing |object| mark region covering |address|
300 : // dirty. |object| is the object being stored into, |value| is the
301 : // object being stored. The address and value registers are clobbered by the
302 : // operation. RecordWrite filters out smis so it does not update
303 : // the write barrier if the value is a smi.
304 : void RecordWrite(
305 : Register object,
306 : Register address,
307 : Register value,
308 : SaveFPRegsMode save_fp,
309 : RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
310 : SmiCheck smi_check = INLINE_SMI_CHECK,
311 : PointersToHereCheck pointers_to_here_check_for_value =
312 : kPointersToHereMaybeInteresting);
313 :
314 : // Frame restart support.
315 : void MaybeDropFrames();
316 :
317 : // Generates function and stub prologue code.
318 : void StubPrologue(StackFrame::Type type);
319 : void Prologue(bool code_pre_aging);
320 :
321 : // Enter specific kind of exit frame; either in normal or
322 : // debug mode. Expects the number of arguments in register rax and
323 : // sets up the number of arguments in register rdi and the pointer
324 : // to the first argument in register rsi.
325 : //
326 : // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
327 : // accessible via StackSpaceOperand.
328 : void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
329 : StackFrame::Type frame_type = StackFrame::EXIT);
330 :
331 : // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
332 : // memory (not GCed) on the stack accessible via StackSpaceOperand.
333 : void EnterApiExitFrame(int arg_stack_space);
334 :
335 : // Leave the current exit frame. Expects/provides the return value in
336 : // register rax:rdx (untouched) and the pointer to the first
337 : // argument in register rsi (if pop_arguments == true).
338 : void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
339 :
340 : // Leave the current exit frame. Expects/provides the return value in
341 : // register rax (untouched).
342 : void LeaveApiExitFrame(bool restore_context);
343 :
344 : // Push and pop the registers that can hold pointers.
345 84995 : void PushSafepointRegisters() { Pushad(); }
346 84995 : void PopSafepointRegisters() { Popad(); }
347 : // Store the value in register src in the safepoint register stack
348 : // slot for register dst.
349 : void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
350 : void StoreToSafepointRegisterSlot(Register dst, Register src);
351 : void LoadFromSafepointRegisterSlot(Register dst, Register src);
352 :
353 309295 : void InitializeRootRegister() {
354 : ExternalReference roots_array_start =
355 309295 : ExternalReference::roots_array_start(isolate());
356 : Move(kRootRegister, roots_array_start);
357 309295 : addp(kRootRegister, Immediate(kRootRegisterBias));
358 309295 : }
359 :
360 : // ---------------------------------------------------------------------------
361 : // JavaScript invokes
362 :
363 : // Removes current frame and its arguments from the stack preserving
364 : // the arguments and a return address pushed to the stack for the next call.
365 : // |ra_state| defines whether return address is already pushed to stack or
366 : // not. Both |callee_args_count| and |caller_args_count_reg| do not include
367 : // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
368 : // is trashed.
369 : void PrepareForTailCall(const ParameterCount& callee_args_count,
370 : Register caller_args_count_reg, Register scratch0,
371 : Register scratch1, ReturnAddressState ra_state);
372 :
373 : // Invoke the JavaScript function code by either calling or jumping.
374 : void InvokeFunctionCode(Register function, Register new_target,
375 : const ParameterCount& expected,
376 : const ParameterCount& actual, InvokeFlag flag,
377 : const CallWrapper& call_wrapper);
378 :
379 : // On function call, call into the debugger if necessary.
380 : void CheckDebugHook(Register fun, Register new_target,
381 : const ParameterCount& expected,
382 : const ParameterCount& actual);
383 :
384 : // Invoke the JavaScript function in the given register. Changes the
385 : // current context to the context in the function before invoking.
386 : void InvokeFunction(Register function,
387 : Register new_target,
388 : const ParameterCount& actual,
389 : InvokeFlag flag,
390 : const CallWrapper& call_wrapper);
391 :
392 : void InvokeFunction(Register function,
393 : Register new_target,
394 : const ParameterCount& expected,
395 : const ParameterCount& actual,
396 : InvokeFlag flag,
397 : const CallWrapper& call_wrapper);
398 :
399 : void InvokeFunction(Handle<JSFunction> function,
400 : const ParameterCount& expected,
401 : const ParameterCount& actual,
402 : InvokeFlag flag,
403 : const CallWrapper& call_wrapper);
404 :
405 : // ---------------------------------------------------------------------------
406 : // Smi tagging, untagging and operations on tagged smis.
407 :
408 : // Support for constant splitting.
409 : bool IsUnsafeInt(const int32_t x);
410 : void SafeMove(Register dst, Smi* src);
411 : void SafePush(Smi* src);
412 :
413 : // Conversions between tagged smi values and non-tagged integer values.
414 :
415 : // Tag an integer value. The result must be known to be a valid smi value.
416 : // Only uses the low 32 bits of the src register. Sets the N and Z flags
417 : // based on the value of the resulting smi.
418 : void Integer32ToSmi(Register dst, Register src);
419 :
420 : // Stores an integer32 value into a memory field that already holds a smi.
421 : void Integer32ToSmiField(const Operand& dst, Register src);
422 :
423 : // Adds constant to src and tags the result as a smi.
424 : // Result must be a valid smi.
425 : void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
426 :
427 : // Convert smi to 32-bit integer. I.e., not sign extended into
428 : // high 32 bits of destination.
429 : void SmiToInteger32(Register dst, Register src);
430 : void SmiToInteger32(Register dst, const Operand& src);
431 :
432 : // Convert smi to 64-bit integer (sign extended if necessary).
433 : void SmiToInteger64(Register dst, Register src);
434 : void SmiToInteger64(Register dst, const Operand& src);
435 :
436 : // Convert smi to double.
437 : void SmiToDouble(XMMRegister dst, Register src) {
438 : SmiToInteger32(kScratchRegister, src);
439 : Cvtlsi2sd(dst, kScratchRegister);
440 : }
441 :
442 : // Multiply a positive smi's integer value by a power of two.
443 : // Provides result as 64-bit integer value.
444 : void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
445 : Register src,
446 : int power);
447 :
448 : // Simple comparison of smis. Both sides must be known smis to use these,
449 : // otherwise use Cmp.
450 : void SmiCompare(Register smi1, Register smi2);
451 : void SmiCompare(Register dst, Smi* src);
452 : void SmiCompare(Register dst, const Operand& src);
453 : void SmiCompare(const Operand& dst, Register src);
454 : void SmiCompare(const Operand& dst, Smi* src);
455 : // Compare the int32 in src register to the value of the smi stored at dst.
456 : void SmiTest(Register src);
457 :
458 : // Functions performing a check on a known or potential smi. Returns
459 : // a condition that is satisfied if the check is successful.
460 :
461 : // Is the value a tagged smi.
462 : Condition CheckSmi(Register src);
463 : Condition CheckSmi(const Operand& src);
464 :
465 : // Is the value a non-negative tagged smi.
466 : Condition CheckNonNegativeSmi(Register src);
467 :
468 : // Are both values tagged smis.
469 : Condition CheckBothSmi(Register first, Register second);
470 :
471 : // Are both values non-negative tagged smis.
472 : Condition CheckBothNonNegativeSmi(Register first, Register second);
473 :
474 : // Are either value a tagged smi.
475 : Condition CheckEitherSmi(Register first,
476 : Register second,
477 : Register scratch = kScratchRegister);
478 :
479 : // Checks whether an 32-bit integer value is a valid for conversion
480 : // to a smi.
481 : Condition CheckInteger32ValidSmiValue(Register src);
482 :
483 : // Checks whether an 32-bit unsigned integer value is a valid for
484 : // conversion to a smi.
485 : Condition CheckUInteger32ValidSmiValue(Register src);
486 :
487 : // Test-and-jump functions. Typically combines a check function
488 : // above with a conditional jump.
489 :
490 : // Jump if the value can be represented by a smi.
491 : void JumpIfValidSmiValue(Register src, Label* on_valid,
492 : Label::Distance near_jump = Label::kFar);
493 :
494 : // Jump if the value cannot be represented by a smi.
495 : void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
496 : Label::Distance near_jump = Label::kFar);
497 :
498 : // Jump if the unsigned integer value can be represented by a smi.
499 : void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
500 : Label::Distance near_jump = Label::kFar);
501 :
502 : // Jump if the unsigned integer value cannot be represented by a smi.
503 : void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
504 : Label::Distance near_jump = Label::kFar);
505 :
506 : // Jump to label if the value is a tagged smi.
507 : void JumpIfSmi(Register src,
508 : Label* on_smi,
509 : Label::Distance near_jump = Label::kFar);
510 :
511 : // Jump to label if the value is not a tagged smi.
512 : void JumpIfNotSmi(Register src,
513 : Label* on_not_smi,
514 : Label::Distance near_jump = Label::kFar);
515 :
516 : // Jump to label if the value is not a tagged smi.
517 : void JumpIfNotSmi(Operand src, Label* on_not_smi,
518 : Label::Distance near_jump = Label::kFar);
519 :
520 : // Jump to label if the value is not a non-negative tagged smi.
521 : void JumpUnlessNonNegativeSmi(Register src,
522 : Label* on_not_smi,
523 : Label::Distance near_jump = Label::kFar);
524 :
525 : // Jump to label if the value, which must be a tagged smi, has value equal
526 : // to the constant.
527 : void JumpIfSmiEqualsConstant(Register src,
528 : Smi* constant,
529 : Label* on_equals,
530 : Label::Distance near_jump = Label::kFar);
531 :
532 : // Jump if either or both register are not smi values.
533 : void JumpIfNotBothSmi(Register src1,
534 : Register src2,
535 : Label* on_not_both_smi,
536 : Label::Distance near_jump = Label::kFar);
537 :
538 : // Jump if either or both register are not non-negative smi values.
539 : void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
540 : Label* on_not_both_smi,
541 : Label::Distance near_jump = Label::kFar);
542 :
543 : // Operations on tagged smi values.
544 :
545 : // Smis represent a subset of integers. The subset is always equivalent to
546 : // a two's complement interpretation of a fixed number of bits.
547 :
548 : // Add an integer constant to a tagged smi, giving a tagged smi as result.
549 : // No overflow testing on the result is done.
550 : void SmiAddConstant(Register dst, Register src, Smi* constant);
551 :
552 : // Add an integer constant to a tagged smi, giving a tagged smi as result.
553 : // No overflow testing on the result is done.
554 : void SmiAddConstant(const Operand& dst, Smi* constant);
555 :
556 : // Add an integer constant to a tagged smi, giving a tagged smi as result,
557 : // or jumping to a label if the result cannot be represented by a smi.
558 : void SmiAddConstant(Register dst, Register src, Smi* constant,
559 : SmiOperationConstraints constraints, Label* bailout_label,
560 : Label::Distance near_jump = Label::kFar);
561 :
562 : // Subtract an integer constant from a tagged smi, giving a tagged smi as
563 : // result. No testing on the result is done. Sets the N and Z flags
564 : // based on the value of the resulting integer.
565 : void SmiSubConstant(Register dst, Register src, Smi* constant);
566 :
567 : // Subtract an integer constant from a tagged smi, giving a tagged smi as
568 : // result, or jumping to a label if the result cannot be represented by a smi.
569 : void SmiSubConstant(Register dst, Register src, Smi* constant,
570 : SmiOperationConstraints constraints, Label* bailout_label,
571 : Label::Distance near_jump = Label::kFar);
572 :
573 : // Negating a smi can give a negative zero or too large positive value.
574 : // NOTICE: This operation jumps on success, not failure!
575 : void SmiNeg(Register dst,
576 : Register src,
577 : Label* on_smi_result,
578 : Label::Distance near_jump = Label::kFar);
579 :
580 : // Adds smi values and return the result as a smi.
581 : // If dst is src1, then src1 will be destroyed if the operation is
582 : // successful, otherwise kept intact.
583 : void SmiAdd(Register dst,
584 : Register src1,
585 : Register src2,
586 : Label* on_not_smi_result,
587 : Label::Distance near_jump = Label::kFar);
588 : void SmiAdd(Register dst,
589 : Register src1,
590 : const Operand& src2,
591 : Label* on_not_smi_result,
592 : Label::Distance near_jump = Label::kFar);
593 :
594 : void SmiAdd(Register dst,
595 : Register src1,
596 : Register src2);
597 :
598 : // Subtracts smi values and return the result as a smi.
599 : // If dst is src1, then src1 will be destroyed if the operation is
600 : // successful, otherwise kept intact.
601 : void SmiSub(Register dst,
602 : Register src1,
603 : Register src2,
604 : Label* on_not_smi_result,
605 : Label::Distance near_jump = Label::kFar);
606 : void SmiSub(Register dst,
607 : Register src1,
608 : const Operand& src2,
609 : Label* on_not_smi_result,
610 : Label::Distance near_jump = Label::kFar);
611 :
612 : void SmiSub(Register dst,
613 : Register src1,
614 : Register src2);
615 :
616 : void SmiSub(Register dst,
617 : Register src1,
618 : const Operand& src2);
619 :
620 : // Multiplies smi values and return the result as a smi,
621 : // if possible.
622 : // If dst is src1, then src1 will be destroyed, even if
623 : // the operation is unsuccessful.
624 : void SmiMul(Register dst,
625 : Register src1,
626 : Register src2,
627 : Label* on_not_smi_result,
628 : Label::Distance near_jump = Label::kFar);
629 :
630 : // Divides one smi by another and returns the quotient.
631 : // Clobbers rax and rdx registers.
632 : void SmiDiv(Register dst,
633 : Register src1,
634 : Register src2,
635 : Label* on_not_smi_result,
636 : Label::Distance near_jump = Label::kFar);
637 :
638 : // Divides one smi by another and returns the remainder.
639 : // Clobbers rax and rdx registers.
640 : void SmiMod(Register dst,
641 : Register src1,
642 : Register src2,
643 : Label* on_not_smi_result,
644 : Label::Distance near_jump = Label::kFar);
645 :
646 : // Bitwise operations.
647 : void SmiNot(Register dst, Register src);
648 : void SmiAnd(Register dst, Register src1, Register src2);
649 : void SmiOr(Register dst, Register src1, Register src2);
650 : void SmiXor(Register dst, Register src1, Register src2);
651 : void SmiAndConstant(Register dst, Register src1, Smi* constant);
652 : void SmiOrConstant(Register dst, Register src1, Smi* constant);
653 : void SmiXorConstant(Register dst, Register src1, Smi* constant);
654 :
655 : void SmiShiftLeftConstant(Register dst,
656 : Register src,
657 : int shift_value,
658 : Label* on_not_smi_result = NULL,
659 : Label::Distance near_jump = Label::kFar);
660 : void SmiShiftLogicalRightConstant(Register dst,
661 : Register src,
662 : int shift_value,
663 : Label* on_not_smi_result,
664 : Label::Distance near_jump = Label::kFar);
665 : void SmiShiftArithmeticRightConstant(Register dst,
666 : Register src,
667 : int shift_value);
668 :
669 : // Shifts a smi value to the left, and returns the result if that is a smi.
670 : // Uses and clobbers rcx, so dst may not be rcx.
671 : void SmiShiftLeft(Register dst,
672 : Register src1,
673 : Register src2,
674 : Label* on_not_smi_result = NULL,
675 : Label::Distance near_jump = Label::kFar);
676 : // Shifts a smi value to the right, shifting in zero bits at the top, and
677 : // returns the unsigned intepretation of the result if that is a smi.
678 : // Uses and clobbers rcx, so dst may not be rcx.
679 : void SmiShiftLogicalRight(Register dst,
680 : Register src1,
681 : Register src2,
682 : Label* on_not_smi_result,
683 : Label::Distance near_jump = Label::kFar);
684 : // Shifts a smi value to the right, sign extending the top, and
685 : // returns the signed intepretation of the result. That will always
686 : // be a valid smi value, since it's numerically smaller than the
687 : // original.
688 : // Uses and clobbers rcx, so dst may not be rcx.
689 : void SmiShiftArithmeticRight(Register dst,
690 : Register src1,
691 : Register src2);
692 :
693 : // Specialized operations
694 :
695 : // Select the non-smi register of two registers where exactly one is a
696 : // smi. If neither are smis, jump to the failure label.
697 : void SelectNonSmi(Register dst,
698 : Register src1,
699 : Register src2,
700 : Label* on_not_smis,
701 : Label::Distance near_jump = Label::kFar);
702 :
703 : // Converts, if necessary, a smi to a combination of number and
704 : // multiplier to be used as a scaled index.
705 : // The src register contains a *positive* smi value. The shift is the
706 : // power of two to multiply the index value by (e.g.
707 : // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
708 : // The returned index register may be either src or dst, depending
709 : // on what is most efficient. If src and dst are different registers,
710 : // src is always unchanged.
711 : SmiIndex SmiToIndex(Register dst, Register src, int shift);
712 :
713 : // Converts a positive smi to a negative index.
714 : SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
715 :
716 : // Add the value of a smi in memory to an int32 register.
717 : // Sets flags as a normal add.
718 : void AddSmiField(Register dst, const Operand& src);
719 :
720 : // Basic Smi operations.
721 : void Move(Register dst, Smi* source) {
722 10726488 : LoadSmiConstant(dst, source);
723 : }
724 :
725 11047 : void Move(const Operand& dst, Smi* source) {
726 11047 : Register constant = GetSmiConstant(source);
727 11047 : movp(dst, constant);
728 11047 : }
729 :
730 : void Push(Smi* smi);
731 :
732 : // Save away a raw integer with pointer size on the stack as two integers
733 : // masquerading as smis so that the garbage collector skips visiting them.
734 : void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
735 : // Reconstruct a raw integer with pointer size from two integers masquerading
736 : // as smis on the top of stack.
737 : void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
738 :
739 : void Test(const Operand& dst, Smi* source);
740 :
741 :
742 : // ---------------------------------------------------------------------------
743 : // String macros.
744 :
745 : // If object is a string, its map is loaded into object_map.
746 : void JumpIfNotString(Register object,
747 : Register object_map,
748 : Label* not_string,
749 : Label::Distance near_jump = Label::kFar);
750 :
751 :
752 : void JumpIfNotBothSequentialOneByteStrings(
753 : Register first_object, Register second_object, Register scratch1,
754 : Register scratch2, Label* on_not_both_flat_one_byte,
755 : Label::Distance near_jump = Label::kFar);
756 :
757 : void JumpIfBothInstanceTypesAreNotSequentialOneByte(
758 : Register first_object_instance_type, Register second_object_instance_type,
759 : Register scratch1, Register scratch2, Label* on_fail,
760 : Label::Distance near_jump = Label::kFar);
761 :
762 : void EmitSeqStringSetCharCheck(Register string,
763 : Register index,
764 : Register value,
765 : uint32_t encoding_mask);
766 :
767 : // Checks if the given register or operand is a unique name
768 : void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
769 : Label::Distance distance = Label::kFar);
770 : void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
771 : Label::Distance distance = Label::kFar);
772 :
773 : // ---------------------------------------------------------------------------
774 : // Macro instructions.
775 :
776 : // Load/store with specific representation.
777 : void Load(Register dst, const Operand& src, Representation r);
778 : void Store(const Operand& dst, Register src, Representation r);
779 :
780 : // Load a register with a long value as efficiently as possible.
781 : void Set(Register dst, int64_t x);
782 : void Set(const Operand& dst, intptr_t x);
783 :
784 : void Cvtss2sd(XMMRegister dst, XMMRegister src);
785 : void Cvtss2sd(XMMRegister dst, const Operand& src);
786 : void Cvtsd2ss(XMMRegister dst, XMMRegister src);
787 : void Cvtsd2ss(XMMRegister dst, const Operand& src);
788 :
789 : // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
790 : // hinders register renaming and makes dependence chains longer. So we use
791 : // xorpd to clear the dst register before cvtsi2sd to solve this issue.
792 : void Cvtlsi2sd(XMMRegister dst, Register src);
793 : void Cvtlsi2sd(XMMRegister dst, const Operand& src);
794 :
795 : void Cvtlsi2ss(XMMRegister dst, Register src);
796 : void Cvtlsi2ss(XMMRegister dst, const Operand& src);
797 : void Cvtqsi2ss(XMMRegister dst, Register src);
798 : void Cvtqsi2ss(XMMRegister dst, const Operand& src);
799 :
800 : void Cvtqsi2sd(XMMRegister dst, Register src);
801 : void Cvtqsi2sd(XMMRegister dst, const Operand& src);
802 :
803 : void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
804 : void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
805 :
806 : void Cvtsd2si(Register dst, XMMRegister src);
807 :
808 : void Cvttss2si(Register dst, XMMRegister src);
809 : void Cvttss2si(Register dst, const Operand& src);
810 : void Cvttsd2si(Register dst, XMMRegister src);
811 : void Cvttsd2si(Register dst, const Operand& src);
812 : void Cvttss2siq(Register dst, XMMRegister src);
813 : void Cvttss2siq(Register dst, const Operand& src);
814 : void Cvttsd2siq(Register dst, XMMRegister src);
815 : void Cvttsd2siq(Register dst, const Operand& src);
816 :
817 : // Move if the registers are not identical.
818 : void Move(Register target, Register source);
819 :
820 : void LoadSharedFunctionInfoSpecialField(Register dst,
821 : Register base,
822 : int offset);
823 :
824 : // Handle support
825 : void Move(Register dst, Handle<Object> source);
826 : void Move(const Operand& dst, Handle<Object> source);
827 : void Cmp(Register dst, Handle<Object> source);
828 : void Cmp(const Operand& dst, Handle<Object> source);
829 : void Cmp(Register dst, Smi* src);
830 : void Cmp(const Operand& dst, Smi* src);
831 : void Push(Handle<Object> source);
832 :
833 : // Load a heap object and handle the case of new-space objects by
834 : // indirecting via a global cell.
835 : void MoveHeapObject(Register result, Handle<Object> object);
836 :
837 : void GetWeakValue(Register value, Handle<WeakCell> cell);
838 :
839 : // Load the value of the weak cell in the value register. Branch to the given
840 : // miss label if the weak cell was cleared.
841 : void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
842 :
843 : // Emit code that loads |parameter_index|'th parameter from the stack to
844 : // the register according to the CallInterfaceDescriptor definition.
845 : // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
846 : // below the caller's sp (on x64 it's at least return address).
847 : template <class Descriptor>
848 : void LoadParameterFromStack(
849 : Register reg, typename Descriptor::ParameterIndices parameter_index,
850 : int sp_to_ra_offset_in_words = 1) {
851 : DCHECK(Descriptor::kPassLastArgsOnStack);
852 : UNIMPLEMENTED();
853 : }
854 :
855 : // Emit code to discard a non-negative number of pointer-sized elements
856 : // from the stack, clobbering only the rsp register.
857 : void Drop(int stack_elements);
858 : // Emit code to discard a positive number of pointer-sized elements
859 : // from the stack under the return address which remains on the top,
860 : // clobbering the rsp register.
861 : void DropUnderReturnAddress(int stack_elements,
862 : Register scratch = kScratchRegister);
863 :
864 : void Call(Label* target) { call(target); }
865 : void Push(Register src);
866 : void Push(const Operand& src);
867 : void PushQuad(const Operand& src);
868 : void Push(Immediate value);
869 : void PushImm32(int32_t imm32);
870 : void Pop(Register dst);
871 : void Pop(const Operand& dst);
872 : void PopQuad(const Operand& dst);
873 55703 : void PushReturnAddressFrom(Register src) { pushq(src); }
874 40218 : void PopReturnAddressTo(Register dst) { popq(dst); }
875 : void Move(Register dst, ExternalReference ext) {
876 : movp(dst, reinterpret_cast<void*>(ext.address()),
877 4595778 : RelocInfo::EXTERNAL_REFERENCE);
878 : }
879 :
880 : // Loads a pointer into a register with a relocation mode.
881 : void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
882 : // This method must not be used with heap object references. The stored
883 : // address is not GC safe. Use the handle version instead.
884 : DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
885 13295232 : movp(dst, ptr, rmode);
886 : }
887 :
888 : void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
889 : AllowDeferredHandleDereference using_raw_address;
890 : DCHECK(!RelocInfo::IsNone(rmode));
891 : DCHECK(value->IsHeapObject());
892 16235492 : movp(dst, reinterpret_cast<void*>(value.location()), rmode);
893 : }
894 :
895 : void Move(XMMRegister dst, uint32_t src);
896 : void Move(XMMRegister dst, uint64_t src);
897 46 : void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
898 1452 : void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
899 :
900 : #define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
901 : void macro_name(XMMRegister dst, src_type src) { \
902 : if (CpuFeatures::IsSupported(AVX)) { \
903 : CpuFeatureScope scope(this, AVX); \
904 : v##name(dst, dst, src); \
905 : } else { \
906 : name(dst, src); \
907 : } \
908 : }
909 : #define AVX_OP2_X(macro_name, name) \
910 : AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
911 : #define AVX_OP2_O(macro_name, name) \
912 : AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
913 : #define AVX_OP2_XO(macro_name, name) \
914 : AVX_OP2_X(macro_name, name) \
915 : AVX_OP2_O(macro_name, name)
916 :
917 2726 : AVX_OP2_XO(Addsd, addsd)
918 6980 : AVX_OP2_XO(Subsd, subsd)
919 448 : AVX_OP2_XO(Mulsd, mulsd)
920 242 : AVX_OP2_XO(Divss, divss)
921 94210 : AVX_OP2_XO(Divsd, divsd)
922 28 : AVX_OP2_XO(Andps, andps)
923 1654 : AVX_OP2_XO(Andpd, andpd)
924 1428 : AVX_OP2_XO(Orpd, orpd)
925 458651 : AVX_OP2_XO(Xorpd, xorpd)
926 : AVX_OP2_XO(Cmpeqps, cmpeqps)
927 : AVX_OP2_XO(Cmpltps, cmpltps)
928 : AVX_OP2_XO(Cmpleps, cmpleps)
929 : AVX_OP2_XO(Cmpneqps, cmpneqps)
930 : AVX_OP2_XO(Cmpnltps, cmpnltps)
931 : AVX_OP2_XO(Cmpnleps, cmpnleps)
932 : AVX_OP2_XO(Cmpeqpd, cmpeqpd)
933 : AVX_OP2_XO(Cmpltpd, cmpltpd)
934 : AVX_OP2_XO(Cmplepd, cmplepd)
935 : AVX_OP2_XO(Cmpneqpd, cmpneqpd)
936 : AVX_OP2_XO(Cmpnltpd, cmpnltpd)
937 : AVX_OP2_XO(Cmpnlepd, cmpnlepd)
938 78265 : AVX_OP2_X(Pcmpeqd, pcmpeqd)
939 33136 : AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
940 64 : AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
941 :
942 : #undef AVX_OP2_O
943 : #undef AVX_OP2_X
944 : #undef AVX_OP2_XO
945 : #undef AVX_OP2_WITH_TYPE
946 :
947 : void Movsd(XMMRegister dst, XMMRegister src);
948 : void Movsd(XMMRegister dst, const Operand& src);
949 : void Movsd(const Operand& dst, XMMRegister src);
950 : void Movss(XMMRegister dst, XMMRegister src);
951 : void Movss(XMMRegister dst, const Operand& src);
952 : void Movss(const Operand& dst, XMMRegister src);
953 :
954 : void Movd(XMMRegister dst, Register src);
955 : void Movd(XMMRegister dst, const Operand& src);
956 : void Movd(Register dst, XMMRegister src);
957 : void Movq(XMMRegister dst, Register src);
958 : void Movq(Register dst, XMMRegister src);
959 :
960 : void Movaps(XMMRegister dst, XMMRegister src);
961 : void Movups(XMMRegister dst, XMMRegister src);
962 : void Movups(XMMRegister dst, const Operand& src);
963 : void Movups(const Operand& dst, XMMRegister src);
964 : void Movmskps(Register dst, XMMRegister src);
965 : void Movapd(XMMRegister dst, XMMRegister src);
966 : void Movupd(XMMRegister dst, const Operand& src);
967 : void Movupd(const Operand& dst, XMMRegister src);
968 : void Movmskpd(Register dst, XMMRegister src);
969 :
970 : void Xorps(XMMRegister dst, XMMRegister src);
971 : void Xorps(XMMRegister dst, const Operand& src);
972 :
973 : void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
974 : void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
975 : void Sqrtsd(XMMRegister dst, XMMRegister src);
976 : void Sqrtsd(XMMRegister dst, const Operand& src);
977 :
978 : void Ucomiss(XMMRegister src1, XMMRegister src2);
979 : void Ucomiss(XMMRegister src1, const Operand& src2);
980 : void Ucomisd(XMMRegister src1, XMMRegister src2);
981 : void Ucomisd(XMMRegister src1, const Operand& src2);
982 :
983 : // ---------------------------------------------------------------------------
984 : // SIMD macros.
985 : void Absps(XMMRegister dst);
986 : void Negps(XMMRegister dst);
987 : void Abspd(XMMRegister dst);
988 : void Negpd(XMMRegister dst);
989 :
990 : // Control Flow
991 : void Jump(Address destination, RelocInfo::Mode rmode);
992 : void Jump(ExternalReference ext);
993 : void Jump(const Operand& op);
994 : void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
995 :
996 : void Call(Address destination, RelocInfo::Mode rmode);
997 : void Call(ExternalReference ext);
998 : void Call(const Operand& op);
999 : void Call(Handle<Code> code_object,
1000 : RelocInfo::Mode rmode,
1001 : TypeFeedbackId ast_id = TypeFeedbackId::None());
1002 :
1003 : // The size of the code generated for different call instructions.
1004 : int CallSize(Address destination) {
1005 : return kCallSequenceLength;
1006 : }
1007 : int CallSize(ExternalReference ext);
1008 : int CallSize(Handle<Code> code_object) {
1009 : // Code calls use 32-bit relative addressing.
1010 : return kShortCallInstructionLength;
1011 : }
1012 : int CallSize(Register target) {
1013 : // Opcode: REX_opt FF /2 m64
1014 : return (target.high_bit() != 0) ? 3 : 2;
1015 : }
1016 9702 : int CallSize(const Operand& target) {
1017 : // Opcode: REX_opt FF /2 m64
1018 9702 : return (target.requires_rex() ? 2 : 1) + target.operand_size();
1019 : }
1020 :
1021 : // Non-SSE2 instructions.
1022 : void Pextrd(Register dst, XMMRegister src, int8_t imm8);
1023 : void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
1024 : void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
1025 :
1026 : void Lzcntq(Register dst, Register src);
1027 : void Lzcntq(Register dst, const Operand& src);
1028 :
1029 : void Lzcntl(Register dst, Register src);
1030 : void Lzcntl(Register dst, const Operand& src);
1031 :
1032 : void Tzcntq(Register dst, Register src);
1033 : void Tzcntq(Register dst, const Operand& src);
1034 :
1035 : void Tzcntl(Register dst, Register src);
1036 : void Tzcntl(Register dst, const Operand& src);
1037 :
1038 : void Popcntl(Register dst, Register src);
1039 : void Popcntl(Register dst, const Operand& src);
1040 :
1041 : void Popcntq(Register dst, Register src);
1042 : void Popcntq(Register dst, const Operand& src);
1043 :
1044 : // Non-x64 instructions.
1045 : // Push/pop all general purpose registers.
1046 : // Does not push rsp/rbp nor any of the assembler's special purpose registers
1047 : // (kScratchRegister, kRootRegister).
1048 : void Pushad();
1049 : void Popad();
1050 :
1051 : // Compare object type for heap object.
1052 : // Always use unsigned comparisons: above and below, not less and greater.
1053 : // Incoming register is heap_object and outgoing register is map.
1054 : // They may be the same register, and may be kScratchRegister.
1055 : void CmpObjectType(Register heap_object, InstanceType type, Register map);
1056 :
1057 : // Compare instance type for map.
1058 : // Always use unsigned comparisons: above and below, not less and greater.
1059 : void CmpInstanceType(Register map, InstanceType type);
1060 :
1061 : // Compare an object's map with the specified map.
1062 : void CompareMap(Register obj, Handle<Map> map);
1063 :
1064 : // Check if the map of an object is equal to a specified map and branch to
1065 : // label if not. Skip the smi check if not required (object is known to be a
1066 : // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1067 : // against maps that are ElementsKind transition maps of the specified map.
1068 : void CheckMap(Register obj,
1069 : Handle<Map> map,
1070 : Label* fail,
1071 : SmiCheckType smi_check_type);
1072 :
1073 : // Check if the object in register heap_object is a string. Afterwards the
1074 : // register map contains the object map and the register instance_type
1075 : // contains the instance_type. The registers map and instance_type can be the
1076 : // same in which case it contains the instance type afterwards. Either of the
1077 : // registers map and instance_type can be the same as heap_object.
1078 : Condition IsObjectStringType(Register heap_object,
1079 : Register map,
1080 : Register instance_type);
1081 :
1082 : // FCmp compares and pops the two values on top of the FPU stack.
1083 : // The flag results are similar to integer cmp, but requires unsigned
1084 : // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
1085 : void FCmp();
1086 :
1087 : void ClampUint8(Register reg);
1088 :
1089 : void ClampDoubleToUint8(XMMRegister input_reg,
1090 : XMMRegister temp_xmm_reg,
1091 : Register result_reg);
1092 :
1093 : void SlowTruncateToI(Register result_reg, Register input_reg,
1094 : int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1095 :
1096 : void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1097 : void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1098 :
1099 : void DoubleToI(Register result_reg, XMMRegister input_reg,
1100 : XMMRegister scratch, MinusZeroMode minus_zero_mode,
1101 : Label* lost_precision, Label* is_nan, Label* minus_zero,
1102 : Label::Distance dst = Label::kFar);
1103 :
1104 : void LoadUint32(XMMRegister dst, Register src);
1105 :
1106 : void LoadInstanceDescriptors(Register map, Register descriptors);
1107 : void EnumLength(Register dst, Register map);
1108 : void NumberOfOwnDescriptors(Register dst, Register map);
1109 : void LoadAccessor(Register dst, Register holder, int accessor_index,
1110 : AccessorComponent accessor);
1111 :
1112 : template<typename Field>
1113 172 : void DecodeField(Register reg) {
1114 : static const int shift = Field::kShift;
1115 : static const int mask = Field::kMask >> Field::kShift;
1116 : if (shift != 0) {
1117 172 : shrp(reg, Immediate(shift));
1118 : }
1119 172 : andp(reg, Immediate(mask));
1120 172 : }
1121 :
1122 : template<typename Field>
1123 : void DecodeFieldToSmi(Register reg) {
1124 : if (SmiValuesAre32Bits()) {
1125 : andp(reg, Immediate(Field::kMask));
1126 : shlp(reg, Immediate(kSmiShift - Field::kShift));
1127 : } else {
1128 : static const int shift = Field::kShift;
1129 : static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
1130 : DCHECK(SmiValuesAre31Bits());
1131 : DCHECK(kSmiShift == kSmiTagSize);
1132 : DCHECK((mask & 0x80000000u) == 0);
1133 : if (shift < kSmiShift) {
1134 : shlp(reg, Immediate(kSmiShift - shift));
1135 : } else if (shift > kSmiShift) {
1136 : sarp(reg, Immediate(shift - kSmiShift));
1137 : }
1138 : andp(reg, Immediate(mask));
1139 : }
1140 : }
1141 :
1142 : // Abort execution if argument is a smi, enabled via --debug-code.
1143 : void AssertNotSmi(Register object);
1144 :
1145 : // Abort execution if argument is not a smi, enabled via --debug-code.
1146 : void AssertSmi(Register object);
1147 : void AssertSmi(const Operand& object);
1148 :
1149 : // Abort execution if a 64 bit register containing a 32 bit payload does not
1150 : // have zeros in the top 32 bits, enabled via --debug-code.
1151 : void AssertZeroExtended(Register reg);
1152 :
1153 : // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1154 : void AssertFunction(Register object);
1155 :
1156 : // Abort execution if argument is not a JSBoundFunction,
1157 : // enabled via --debug-code.
1158 : void AssertBoundFunction(Register object);
1159 :
1160 : // Abort execution if argument is not a JSGeneratorObject,
1161 : // enabled via --debug-code.
1162 : void AssertGeneratorObject(Register object, Register suspend_flags);
1163 :
1164 : // Abort execution if argument is not undefined or an AllocationSite, enabled
1165 : // via --debug-code.
1166 : void AssertUndefinedOrAllocationSite(Register object);
1167 :
1168 : // ---------------------------------------------------------------------------
1169 : // Exception handling
1170 :
1171 : // Push a new stack handler and link it into stack handler chain.
1172 : void PushStackHandler();
1173 :
1174 : // Unlink the stack handler on top of the stack from the stack handler chain.
1175 : void PopStackHandler();
1176 :
1177 : // ---------------------------------------------------------------------------
1178 : // Inline caching support
1179 :
1180 : void GetNumberHash(Register r0, Register scratch);
1181 :
1182 : // ---------------------------------------------------------------------------
1183 : // Allocation support
1184 :
1185 : // Allocate an object in new space or old space. If the given space
1186 : // is exhausted control continues at the gc_required label. The allocated
1187 : // object is returned in result and end of the new object is returned in
1188 : // result_end. The register scratch can be passed as no_reg in which case
1189 : // an additional object reference will be added to the reloc info. The
1190 : // returned pointers in result and result_end have not yet been tagged as
1191 : // heap objects. If result_contains_top_on_entry is true the content of
1192 : // result is known to be the allocation top on entry (could be result_end
1193 : // from a previous call). If result_contains_top_on_entry is true scratch
1194 : // should be no_reg as it is never used.
1195 : void Allocate(int object_size,
1196 : Register result,
1197 : Register result_end,
1198 : Register scratch,
1199 : Label* gc_required,
1200 : AllocationFlags flags);
1201 :
1202 : void Allocate(int header_size,
1203 : ScaleFactor element_size,
1204 : Register element_count,
1205 : Register result,
1206 : Register result_end,
1207 : Register scratch,
1208 : Label* gc_required,
1209 : AllocationFlags flags);
1210 :
1211 : void Allocate(Register object_size,
1212 : Register result,
1213 : Register result_end,
1214 : Register scratch,
1215 : Label* gc_required,
1216 : AllocationFlags flags);
1217 :
1218 : // FastAllocate is right now only used for folded allocations. It just
1219 : // increments the top pointer without checking against limit. This can only
1220 : // be done if it was proved earlier that the allocation will succeed.
1221 : void FastAllocate(int object_size, Register result, Register result_end,
1222 : AllocationFlags flags);
1223 :
1224 : void FastAllocate(Register object_size, Register result, Register result_end,
1225 : AllocationFlags flags);
1226 :
1227 : // Allocate a heap number in new space with undefined value. Returns
1228 : // tagged pointer in result register, or jumps to gc_required if new
1229 : // space is full.
1230 : void AllocateHeapNumber(Register result,
1231 : Register scratch,
1232 : Label* gc_required,
1233 : MutableMode mode = IMMUTABLE);
1234 :
1235 : // Allocate and initialize a JSValue wrapper with the specified {constructor}
1236 : // and {value}.
1237 : void AllocateJSValue(Register result, Register constructor, Register value,
1238 : Register scratch, Label* gc_required);
1239 :
1240 : // ---------------------------------------------------------------------------
1241 : // Support functions.
1242 :
1243 : // Machine code version of Map::GetConstructor().
1244 : // |temp| holds |result|'s map when done.
1245 : void GetMapConstructor(Register result, Register map, Register temp);
1246 :
1247 : // Find the function context up the context chain.
1248 : void LoadContext(Register dst, int context_chain_length);
1249 :
1250 : // Load the global object from the current context.
1251 : void LoadGlobalObject(Register dst) {
1252 59873 : LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1253 : }
1254 :
1255 : // Load the global proxy from the current context.
1256 : void LoadGlobalProxy(Register dst) {
1257 172 : LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1258 : }
1259 :
1260 : // Load the native context slot with the current index.
1261 : void LoadNativeContextSlot(int index, Register dst);
1262 :
1263 : // Load the initial map from the global function. The registers
1264 : // function and map can be the same.
1265 : void LoadGlobalFunctionInitialMap(Register function, Register map);
1266 :
1267 : // ---------------------------------------------------------------------------
1268 : // Runtime calls
1269 :
1270 : // Call a code stub.
1271 : void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1272 :
1273 : // Tail call a code stub (jump).
1274 : void TailCallStub(CodeStub* stub);
1275 :
1276 : // Call a runtime routine.
1277 : void CallRuntime(const Runtime::Function* f,
1278 : int num_arguments,
1279 : SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1280 :
1281 : // Call a runtime function and save the value of XMM registers.
1282 82054 : void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1283 82054 : const Runtime::Function* function = Runtime::FunctionForId(fid);
1284 82054 : CallRuntime(function, function->nargs, kSaveFPRegs);
1285 82054 : }
1286 :
1287 : // Convenience function: Same as above, but takes the fid instead.
1288 1603074 : void CallRuntime(Runtime::FunctionId fid,
1289 : SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1290 1603074 : const Runtime::Function* function = Runtime::FunctionForId(fid);
1291 1603075 : CallRuntime(function, function->nargs, save_doubles);
1292 1603078 : }
1293 :
1294 : // Convenience function: Same as above, but takes the fid instead.
1295 : void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1296 : SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1297 387 : CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1298 : }
1299 :
1300 : // Convenience function: call an external reference.
1301 : void CallExternalReference(const ExternalReference& ext,
1302 : int num_arguments);
1303 :
1304 : // Convenience function: tail call a runtime routine (jump)
1305 : void TailCallRuntime(Runtime::FunctionId fid);
1306 :
1307 : // Jump to a runtime routines
1308 : void JumpToExternalReference(const ExternalReference& ext,
1309 : bool builtin_exit_frame = false);
1310 :
1311 : // Before calling a C-function from generated code, align arguments on stack.
1312 : // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1313 : // etc., not pushed. The argument count assumes all arguments are word sized.
1314 : // The number of slots reserved for arguments depends on platform. On Windows
1315 : // stack slots are reserved for the arguments passed in registers. On other
1316 : // platforms stack slots are only reserved for the arguments actually passed
1317 : // on the stack.
1318 : void PrepareCallCFunction(int num_arguments);
1319 :
1320 : // Calls a C function and cleans up the space for arguments allocated
1321 : // by PrepareCallCFunction. The called function is not allowed to trigger a
1322 : // garbage collection, since that might move the code and invalidate the
1323 : // return address (unless this is somehow accounted for by the called
1324 : // function).
1325 : void CallCFunction(ExternalReference function, int num_arguments);
1326 : void CallCFunction(Register function, int num_arguments);
1327 :
1328 : // Calculate the number of stack slots to reserve for arguments when calling a
1329 : // C function.
1330 : int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1331 :
1332 : // ---------------------------------------------------------------------------
1333 : // Utilities
1334 :
1335 : void Ret();
1336 :
1337 : // Return and drop arguments from stack, where the number of arguments
1338 : // may be bigger than 2^16 - 1. Requires a scratch register.
1339 : void Ret(int bytes_dropped, Register scratch);
1340 :
1341 : Handle<Object> CodeObject() {
1342 : DCHECK(!code_object_.is_null());
1343 : return code_object_;
1344 : }
1345 :
1346 : // Initialize fields with filler values. Fields starting at |current_address|
1347 : // not including |end_address| are overwritten with the value in |filler|. At
1348 : // the end the loop, |current_address| takes the value of |end_address|.
1349 : void InitializeFieldsWithFiller(Register current_address,
1350 : Register end_address, Register filler);
1351 :
1352 :
1353 : // Emit code for a truncating division by a constant. The dividend register is
1354 : // unchanged, the result is in rdx, and rax gets clobbered.
1355 : void TruncatingDiv(Register dividend, int32_t divisor);
1356 :
1357 : // ---------------------------------------------------------------------------
1358 : // StatsCounter support
1359 :
1360 : void SetCounter(StatsCounter* counter, int value);
1361 : void IncrementCounter(StatsCounter* counter, int value);
1362 : void DecrementCounter(StatsCounter* counter, int value);
1363 :
1364 :
1365 : // ---------------------------------------------------------------------------
1366 : // Debugging
1367 :
1368 : // Calls Abort(msg) if the condition cc is not satisfied.
1369 : // Use --debug_code to enable.
1370 : void Assert(Condition cc, BailoutReason reason);
1371 :
1372 : // Like Assert(), but always enabled.
1373 : void Check(Condition cc, BailoutReason reason);
1374 :
1375 : // Print a message to stdout and abort execution.
1376 : void Abort(BailoutReason msg);
1377 :
1378 : // Check that the stack is aligned.
1379 : void CheckStackAlignment();
1380 :
1381 : // Verify restrictions about code generated in stubs.
1382 137654 : void set_generating_stub(bool value) { generating_stub_ = value; }
1383 : bool generating_stub() { return generating_stub_; }
1384 5809534 : void set_has_frame(bool value) { has_frame_ = value; }
1385 : bool has_frame() { return has_frame_; }
1386 : inline bool AllowThisStubCall(CodeStub* stub);
1387 :
1388 : static int SafepointRegisterStackIndex(Register reg) {
1389 : return SafepointRegisterStackIndex(reg.code());
1390 : }
1391 :
1392 : // Load the type feedback vector from a JavaScript frame.
1393 : void EmitLoadFeedbackVector(Register vector);
1394 :
1395 : // Activation support.
1396 : void EnterFrame(StackFrame::Type type);
1397 : void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1398 : void LeaveFrame(StackFrame::Type type);
1399 :
1400 : void EnterBuiltinFrame(Register context, Register target, Register argc);
1401 : void LeaveBuiltinFrame(Register context, Register target, Register argc);
1402 :
1403 : // Expects object in rax and returns map with validated enum cache
1404 : // in rax. Assumes that any other register can be used as a scratch.
1405 : void CheckEnumCache(Label* call_runtime);
1406 :
1407 : // AllocationMemento support. Arrays may have an associated
1408 : // AllocationMemento object that can be checked for in order to pretransition
1409 : // to another type.
1410 : // On entry, receiver_reg should point to the array object.
1411 : // scratch_reg gets clobbered.
1412 : // If allocation info is present, condition flags are set to equal.
1413 : void TestJSArrayForAllocationMemento(Register receiver_reg,
1414 : Register scratch_reg,
1415 : Label* no_memento_found);
1416 :
1417 : private:
1418 : // Order general registers are pushed by Pushad.
1419 : // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
1420 : static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1421 : static const int kNumSafepointSavedRegisters = 12;
1422 : static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1423 :
1424 : bool generating_stub_;
1425 : bool has_frame_;
1426 : Isolate* isolate_;
1427 : bool root_array_available_;
1428 : int jit_cookie_;
1429 :
1430 : // Returns a register holding the smi value. The register MUST NOT be
1431 : // modified. It may be the "smi 1 constant" register.
1432 : Register GetSmiConstant(Smi* value);
1433 :
1434 : int64_t RootRegisterDelta(ExternalReference other);
1435 :
1436 : // Moves the smi value to the destination register.
1437 : void LoadSmiConstant(Register dst, Smi* value);
1438 :
1439 : // This handle will be patched with the code object on installation.
1440 : Handle<Object> code_object_;
1441 :
1442 : // Helper functions for generating invokes.
1443 : void InvokePrologue(const ParameterCount& expected,
1444 : const ParameterCount& actual,
1445 : Label* done,
1446 : bool* definitely_mismatches,
1447 : InvokeFlag flag,
1448 : Label::Distance near_jump,
1449 : const CallWrapper& call_wrapper);
1450 :
1451 : void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
1452 :
1453 : // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1454 : // accessible via StackSpaceOperand.
1455 : void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1456 :
1457 : void LeaveExitFrameEpilogue(bool restore_context);
1458 :
1459 : // Allocation support helpers.
1460 : // Loads the top of new-space into the result register.
1461 : // Otherwise the address of the new-space top is loaded into scratch (if
1462 : // scratch is valid), and the new-space top is loaded into result.
1463 : void LoadAllocationTopHelper(Register result,
1464 : Register scratch,
1465 : AllocationFlags flags);
1466 :
1467 : void MakeSureDoubleAlignedHelper(Register result,
1468 : Register scratch,
1469 : Label* gc_required,
1470 : AllocationFlags flags);
1471 :
1472 : // Update allocation top with value in result_end register.
1473 : // If scratch is valid, it contains the address of the allocation top.
1474 : void UpdateAllocationTopHelper(Register result_end,
1475 : Register scratch,
1476 : AllocationFlags flags);
1477 :
1478 : // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1479 : void InNewSpace(Register object,
1480 : Register scratch,
1481 : Condition cc,
1482 : Label* branch,
1483 : Label::Distance distance = Label::kFar);
1484 :
1485 : // Helper for finding the mark bits for an address. Afterwards, the
1486 : // bitmap register points at the word with the mark bits and the mask
1487 : // the position of the first bit. Uses rcx as scratch and leaves addr_reg
1488 : // unchanged.
1489 : inline void GetMarkBits(Register addr_reg,
1490 : Register bitmap_reg,
1491 : Register mask_reg);
1492 :
1493 : // Compute memory operands for safepoint stack slots.
1494 : Operand SafepointRegisterSlot(Register reg);
1495 : static int SafepointRegisterStackIndex(int reg_code) {
1496 81572 : return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1497 : }
1498 :
1499 : // Needs access to SafepointRegisterStackIndex for compiled frame
1500 : // traversal.
1501 : friend class StandardFrame;
1502 : };
1503 :
1504 :
1505 : // The code patcher is used to patch (typically) small parts of code e.g. for
1506 : // debugging and other types of instrumentation. When using the code patcher
1507 : // the exact number of bytes specified must be emitted. Is not legal to emit
1508 : // relocation information. If any of these constraints are violated it causes
1509 : // an assertion.
1510 : class CodePatcher {
1511 : public:
1512 : CodePatcher(Isolate* isolate, byte* address, int size);
1513 : ~CodePatcher();
1514 :
1515 : // Macro assembler to emit code.
1516 : MacroAssembler* masm() { return &masm_; }
1517 :
1518 : private:
1519 : byte* address_; // The address of the code being patched.
1520 : int size_; // Number of bytes of the expected patch size.
1521 : MacroAssembler masm_; // Macro assembler used to generate the code.
1522 : };
1523 :
1524 :
1525 : // -----------------------------------------------------------------------------
1526 : // Static helper functions.
1527 :
1528 : // Generate an Operand for loading a field from an object.
1529 : inline Operand FieldOperand(Register object, int offset) {
1530 12510966 : return Operand(object, offset - kHeapObjectTag);
1531 : }
1532 :
1533 :
1534 : // Generate an Operand for loading an indexed field from an object.
1535 : inline Operand FieldOperand(Register object,
1536 : Register index,
1537 : ScaleFactor scale,
1538 : int offset) {
1539 114617 : return Operand(object, index, scale, offset - kHeapObjectTag);
1540 : }
1541 :
1542 :
1543 2950646 : inline Operand ContextOperand(Register context, int index) {
1544 2950646 : return Operand(context, Context::SlotOffset(index));
1545 : }
1546 :
1547 :
1548 : inline Operand ContextOperand(Register context, Register index) {
1549 : return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
1550 : }
1551 :
1552 :
1553 : inline Operand NativeContextOperand() {
1554 91231 : return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
1555 : }
1556 :
1557 :
1558 : // Provides access to exit frame stack space (not GCed).
1559 : inline Operand StackSpaceOperand(int index) {
1560 : #ifdef _WIN64
1561 : const int kShaddowSpace = 4;
1562 : return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1563 : #else
1564 30894 : return Operand(rsp, index * kPointerSize);
1565 : #endif
1566 : }
1567 :
1568 :
1569 : inline Operand StackOperandForReturnAddress(int32_t disp) {
1570 86 : return Operand(rsp, disp);
1571 : }
1572 :
1573 : #define ACCESS_MASM(masm) masm->
1574 :
1575 : } // namespace internal
1576 : } // namespace v8
1577 :
1578 : #endif // V8_X64_MACRO_ASSEMBLER_X64_H_
|