Line data Source code
1 : // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 : // All Rights Reserved.
3 : //
4 : // Redistribution and use in source and binary forms, with or without
5 : // modification, are permitted provided that the following conditions are
6 : // met:
7 : //
8 : // - Redistributions of source code must retain the above copyright notice,
9 : // this list of conditions and the following disclaimer.
10 : //
11 : // - Redistribution in binary form must reproduce the above copyright
12 : // notice, this list of conditions and the following disclaimer in the
13 : // documentation and/or other materials provided with the distribution.
14 : //
15 : // - Neither the name of Sun Microsystems or the names of contributors may
16 : // be used to endorse or promote products derived from this software without
17 : // specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 : // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 : // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 : // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 : // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 : // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 : // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 : // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 : // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 : // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 : // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : // The original source code covered by the above license above has been
32 : // modified significantly by Google Inc.
33 : // Copyright 2012 the V8 project authors. All rights reserved.
34 :
35 : // A lightweight X64 Assembler.
36 :
37 : #ifndef V8_X64_ASSEMBLER_X64_H_
38 : #define V8_X64_ASSEMBLER_X64_H_
39 :
40 : #include <deque>
41 : #include <map>
42 : #include <vector>
43 :
44 : #include "src/assembler.h"
45 : #include "src/label.h"
46 : #include "src/objects/smi.h"
47 : #include "src/x64/constants-x64.h"
48 : #include "src/x64/register-x64.h"
49 : #include "src/x64/sse-instr.h"
50 :
51 : namespace v8 {
52 : namespace internal {
53 :
54 : // Utility functions
55 :
56 : enum Condition {
57 : // any value < 0 is considered no_condition
58 : no_condition = -1,
59 :
60 : overflow = 0,
61 : no_overflow = 1,
62 : below = 2,
63 : above_equal = 3,
64 : equal = 4,
65 : not_equal = 5,
66 : below_equal = 6,
67 : above = 7,
68 : negative = 8,
69 : positive = 9,
70 : parity_even = 10,
71 : parity_odd = 11,
72 : less = 12,
73 : greater_equal = 13,
74 : less_equal = 14,
75 : greater = 15,
76 :
77 : // Fake conditions that are handled by the
78 : // opcodes using them.
79 : always = 16,
80 : never = 17,
81 : // aliases
82 : carry = below,
83 : not_carry = above_equal,
84 : zero = equal,
85 : not_zero = not_equal,
86 : sign = negative,
87 : not_sign = positive,
88 : last_condition = greater
89 : };
90 :
91 :
92 : // Returns the equivalent of !cc.
93 : // Negation of the default no_condition (-1) results in a non-default
94 : // no_condition value (-2). As long as tests for no_condition check
95 : // for condition < 0, this will work as expected.
96 : inline Condition NegateCondition(Condition cc) {
97 456123 : return static_cast<Condition>(cc ^ 1);
98 : }
99 :
100 :
101 : enum RoundingMode {
102 : kRoundToNearest = 0x0,
103 : kRoundDown = 0x1,
104 : kRoundUp = 0x2,
105 : kRoundToZero = 0x3
106 : };
107 :
108 :
109 : // -----------------------------------------------------------------------------
110 : // Machine instruction Immediates
111 :
112 : class Immediate {
113 : public:
114 10679066 : explicit constexpr Immediate(int32_t value) : value_(value) {}
115 : explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
116 : : value_(value), rmode_(rmode) {}
117 : explicit Immediate(Smi value)
118 : : value_(static_cast<int32_t>(static_cast<intptr_t>(value.ptr()))) {
119 : DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
120 : }
121 :
122 : private:
123 : const int32_t value_;
124 : const RelocInfo::Mode rmode_ = RelocInfo::NONE;
125 :
126 : friend class Assembler;
127 : };
128 : ASSERT_TRIVIALLY_COPYABLE(Immediate);
129 : static_assert(sizeof(Immediate) <= kSystemPointerSize,
130 : "Immediate must be small enough to pass it by value");
131 :
132 : // -----------------------------------------------------------------------------
133 : // Machine instruction Operands
134 :
135 : enum ScaleFactor : int8_t {
136 : times_1 = 0,
137 : times_2 = 1,
138 : times_4 = 2,
139 : times_8 = 3,
140 : times_int_size = times_4,
141 : times_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
142 : times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
143 : };
144 :
145 : class V8_EXPORT_PRIVATE Operand {
146 : public:
147 52834592 : struct Data {
148 : byte rex = 0;
149 : byte buf[9];
150 : byte len = 1; // number of bytes of buf_ in use.
151 : int8_t addend; // for rip + offset + addend.
152 : };
153 :
154 : // [base + disp/r]
155 : Operand(Register base, int32_t disp);
156 :
157 : // [base + index*scale + disp/r]
158 : Operand(Register base,
159 : Register index,
160 : ScaleFactor scale,
161 : int32_t disp);
162 :
163 : // [index*scale + disp/r]
164 : Operand(Register index,
165 : ScaleFactor scale,
166 : int32_t disp);
167 :
168 : // Offset from existing memory operand.
169 : // Offset is added to existing displacement as 32-bit signed values and
170 : // this must not overflow.
171 : Operand(Operand base, int32_t offset);
172 :
173 : // [rip + disp/r]
174 : explicit Operand(Label* label, int addend = 0);
175 :
176 : Operand(const Operand&) V8_NOEXCEPT = default;
177 :
178 : // Checks whether either base or index register is the given register.
179 : // Does not check the "reg" part of the Operand.
180 : bool AddressUsesRegister(Register reg) const;
181 :
182 : // Queries related to the size of the generated instruction.
183 : // Whether the generated instruction will have a REX prefix.
184 : bool requires_rex() const { return data_.rex != 0; }
185 : // Size of the ModR/M, SIB and displacement parts of the generated
186 : // instruction.
187 : int operand_size() const { return data_.len; }
188 :
189 : const Data& data() const { return data_; }
190 :
191 : private:
192 : const Data data_;
193 : };
194 : ASSERT_TRIVIALLY_COPYABLE(Operand);
195 : static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
196 : "Operand must be small enough to pass it by value");
197 :
198 : #define ASSEMBLER_INSTRUCTION_LIST(V) \
199 : V(add) \
200 : V(and) \
201 : V(cmp) \
202 : V(cmpxchg) \
203 : V(dec) \
204 : V(idiv) \
205 : V(div) \
206 : V(imul) \
207 : V(inc) \
208 : V(lea) \
209 : V(mov) \
210 : V(movzxb) \
211 : V(movzxw) \
212 : V(neg) \
213 : V(not) \
214 : V(or) \
215 : V(repmovs) \
216 : V(sbb) \
217 : V(sub) \
218 : V(test) \
219 : V(xchg) \
220 : V(xor)
221 :
222 : // Shift instructions on operands/registers with kSystemPointerSize, kInt32Size
223 : // and kInt64Size.
224 : #define SHIFT_INSTRUCTION_LIST(V) \
225 : V(rol, 0x0) \
226 : V(ror, 0x1) \
227 : V(rcl, 0x2) \
228 : V(rcr, 0x3) \
229 : V(shl, 0x4) \
230 : V(shr, 0x5) \
231 : V(sar, 0x7)
232 :
233 : // Partial Constant Pool
234 : // Different from complete constant pool (like arm does), partial constant pool
235 : // only takes effects for shareable constants in order to reduce code size.
236 : // Partial constant pool does not emit constant pool entries at the end of each
237 : // code object. Instead, it keeps the first shareable constant inlined in the
238 : // instructions and uses rip-relative memory loadings for the same constants in
239 : // subsequent instructions. These rip-relative memory loadings will target at
240 : // the position of the first inlined constant. For example:
241 : //
242 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
243 : // …
244 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
245 : // …
246 : //
247 : // turns into
248 : //
249 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
250 : // …
251 : // REX.W movq r10,[rip+0xffffff96] ; 7 bytes
252 : // …
253 :
254 : class ConstPool {
255 : public:
256 6478346 : explicit ConstPool(Assembler* assm) : assm_(assm) {}
257 : // Returns true when partial constant pool is valid for this entry.
258 : bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode);
259 : bool IsEmpty() const { return entries_.empty(); }
260 :
261 : void PatchEntries();
262 : // Discard any pending pool entries.
263 : void Clear();
264 :
265 : private:
266 : // Adds a shared entry to entries_. Returns true if this is not the first time
267 : // we add this entry, false otherwise.
268 : bool AddSharedEntry(uint64_t data, int offset);
269 :
270 : // Check if the instruction is a rip-relative move.
271 : bool IsMoveRipRelative(byte* instr);
272 :
273 : Assembler* assm_;
274 :
275 : // Values, pc offsets of entries.
276 : typedef std::multimap<uint64_t, int> EntryMap;
277 : EntryMap entries_;
278 :
279 : // Number of bytes taken up by the displacement of rip-relative addressing.
280 : static constexpr int kRipRelativeDispSize = 4; // 32-bit displacement.
281 : // Distance between the address of the displacement in the rip-relative move
282 : // instruction and the head address of the instruction.
283 : static constexpr int kMoveRipRelativeDispOffset =
284 : 3; // REX Opcode ModRM Displacement
285 : // Distance between the address of the imm64 in the 'movq reg, imm64'
286 : // instruction and the head address of the instruction.
287 : static constexpr int kMoveImm64Offset = 2; // REX Opcode imm64
288 : // A mask for rip-relative move instruction.
289 : static constexpr uint32_t kMoveRipRelativeMask = 0x00C7FFFB;
290 : // The bits for a rip-relative move instruction after mask.
291 : static constexpr uint32_t kMoveRipRelativeInstr = 0x00058B48;
292 : };
293 :
294 : class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
295 : private:
296 : // We check before assembling an instruction that there is sufficient
297 : // space to write an instruction and its relocation information.
298 : // The relocation writer's position must be kGap bytes above the end of
299 : // the generated instructions. This leaves enough space for the
300 : // longest possible x64 instruction, 15 bytes, and the longest possible
301 : // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
302 : // (There is a 15 byte limit on x64 instruction length that rules out some
303 : // otherwise valid instructions.)
304 : // This allows for a single, fast space check per instruction.
305 : static constexpr int kGap = 32;
306 :
307 : public:
308 : // Create an assembler. Instructions and relocation information are emitted
309 : // into a buffer, with the instructions starting from the beginning and the
310 : // relocation information starting from the end of the buffer. See CodeDesc
311 : // for a detailed comment on the layout (globals.h).
312 : //
313 : // If the provided buffer is nullptr, the assembler allocates and grows its
314 : // own buffer. Otherwise it takes ownership of the provided buffer.
315 : explicit Assembler(const AssemblerOptions&,
316 : std::unique_ptr<AssemblerBuffer> = {});
317 12956988 : ~Assembler() override = default;
318 :
319 : // GetCode emits any pending (non-emitted) code and fills the descriptor
320 : // desc. GetCode() is idempotent; it returns the same result if no other
321 : // Assembler functions are invoked in between GetCode() calls.
322 : void GetCode(Isolate* isolate, CodeDesc* desc);
323 :
324 : // Read/Modify the code target in the relative branch/call instruction at pc.
325 : // On the x64 architecture, we use relative jumps with a 32-bit displacement
326 : // to jump to other Code objects in the Code space in the heap.
327 : // Jumps to C functions are done indirectly through a 64-bit register holding
328 : // the absolute address of the target.
329 : // These functions convert between absolute Addresses of Code objects and
330 : // the relative displacements stored in the code.
331 : // The isolate argument is unused (and may be nullptr) when skipping flushing.
332 : static inline Address target_address_at(Address pc, Address constant_pool);
333 : static inline void set_target_address_at(
334 : Address pc, Address constant_pool, Address target,
335 : ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
336 :
337 : // Return the code target address at a call site from the return address
338 : // of that call in the instruction stream.
339 : static inline Address target_address_from_return_address(Address pc);
340 :
341 : // This sets the branch destination (which is in the instruction on x64).
342 : // This is for calls and branches within generated code.
343 : inline static void deserialization_set_special_target_at(
344 : Address instruction_payload, Code code, Address target);
345 :
346 : // Get the size of the special target encoded at 'instruction_payload'.
347 : inline static int deserialization_special_target_size(
348 : Address instruction_payload);
349 :
350 : // This sets the internal reference at the pc.
351 : inline static void deserialization_set_target_internal_reference_at(
352 : Address pc, Address target,
353 : RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
354 :
355 : inline Handle<Code> code_target_object_handle_at(Address pc);
356 : inline Address runtime_entry_at(Address pc);
357 :
358 : // Number of bytes taken up by the branch target in the code.
359 : static constexpr int kSpecialTargetSize = 4; // 32-bit displacement.
360 : // Distance between the address of the code target in the call instruction
361 : // and the return address pushed on the stack.
362 : static constexpr int kCallTargetAddressOffset = 4; // 32-bit displacement.
363 :
364 : // One byte opcode for test eax,0xXXXXXXXX.
365 : static constexpr byte kTestEaxByte = 0xA9;
366 : // One byte opcode for test al, 0xXX.
367 : static constexpr byte kTestAlByte = 0xA8;
368 : // One byte opcode for nop.
369 : static constexpr byte kNopByte = 0x90;
370 :
371 : // One byte prefix for a short conditional jump.
372 : static constexpr byte kJccShortPrefix = 0x70;
373 : static constexpr byte kJncShortOpcode = kJccShortPrefix | not_carry;
374 : static constexpr byte kJcShortOpcode = kJccShortPrefix | carry;
375 : static constexpr byte kJnzShortOpcode = kJccShortPrefix | not_zero;
376 : static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
377 :
378 : // VEX prefix encodings.
379 : enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
380 : enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
381 : enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
382 : enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
383 :
384 : // ---------------------------------------------------------------------------
385 : // Code generation
386 : //
387 : // Function names correspond one-to-one to x64 instruction mnemonics.
388 : // Unless specified otherwise, instructions operate on 64-bit operands.
389 : //
390 : // If we need versions of an assembly instruction that operate on different
391 : // width arguments, we add a single-letter suffix specifying the width.
392 : // This is done for the following instructions: mov, cmp, inc, dec,
393 : // add, sub, and test.
394 : // There are no versions of these instructions without the suffix.
395 : // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
396 : // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
397 : // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
398 : // - Instructions on 64-bit (quadword) operands/registers use 'q'.
399 : // - Instructions on operands/registers with pointer size use 'p'.
400 :
401 : STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
402 : kSystemPointerSize == kInt32Size);
403 :
404 : #define DECLARE_INSTRUCTION(instruction) \
405 : template <class P1> \
406 : void instruction##p(P1 p1) { \
407 : emit_##instruction(p1, kSystemPointerSize); \
408 : } \
409 : \
410 : template <class P1> \
411 : void instruction##_tagged(P1 p1) { \
412 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
413 : /* TODO(ishell): change to kTaggedSize */ \
414 : emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
415 : } \
416 : \
417 : template <class P1> \
418 : void instruction##l(P1 p1) { \
419 : emit_##instruction(p1, kInt32Size); \
420 : } \
421 : \
422 : template <class P1> \
423 : void instruction##q(P1 p1) { \
424 : emit_##instruction(p1, kInt64Size); \
425 : } \
426 : \
427 : template <class P1, class P2> \
428 : void instruction##p(P1 p1, P2 p2) { \
429 : emit_##instruction(p1, p2, kSystemPointerSize); \
430 : } \
431 : \
432 : template <class P1, class P2> \
433 : void instruction##_tagged(P1 p1, P2 p2) { \
434 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
435 : /* TODO(ishell): change to kTaggedSize */ \
436 : emit_##instruction(p1, p2, \
437 : COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
438 : } \
439 : \
440 : template <class P1, class P2> \
441 : void instruction##l(P1 p1, P2 p2) { \
442 : emit_##instruction(p1, p2, kInt32Size); \
443 : } \
444 : \
445 : template <class P1, class P2> \
446 : void instruction##q(P1 p1, P2 p2) { \
447 : emit_##instruction(p1, p2, kInt64Size); \
448 : } \
449 : \
450 : template <class P1, class P2, class P3> \
451 : void instruction##p(P1 p1, P2 p2, P3 p3) { \
452 : emit_##instruction(p1, p2, p3, kSystemPointerSize); \
453 : } \
454 : \
455 : template <class P1, class P2, class P3> \
456 : void instruction##_tagged(P1 p1, P2 p2, P3 p3) { \
457 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
458 : /* TODO(ishell): change to kTaggedSize */ \
459 : emit_##instruction(p1, p2, p3, \
460 : COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
461 : } \
462 : \
463 : template <class P1, class P2, class P3> \
464 : void instruction##l(P1 p1, P2 p2, P3 p3) { \
465 : emit_##instruction(p1, p2, p3, kInt32Size); \
466 : } \
467 : \
468 : template <class P1, class P2, class P3> \
469 : void instruction##q(P1 p1, P2 p2, P3 p3) { \
470 : emit_##instruction(p1, p2, p3, kInt64Size); \
471 : }
472 109010528 : ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
473 : #undef DECLARE_INSTRUCTION
474 :
475 : // Insert the smallest number of nop instructions
476 : // possible to align the pc offset to a multiple
477 : // of m, where m must be a power of 2.
478 : void Align(int m);
479 : // Insert the smallest number of zero bytes possible to align the pc offset
480 : // to a mulitple of m. m must be a power of 2 (>= 2).
481 : void DataAlign(int m);
482 : void Nop(int bytes = 1);
483 : // Aligns code to something that's optimal for a jump target for the platform.
484 : void CodeTargetAlign();
485 :
486 : // Stack
487 : void pushfq();
488 : void popfq();
489 :
490 : void pushq(Immediate value);
491 : // Push a 32 bit integer, and guarantee that it is actually pushed as a
492 : // 32 bit value, the normal push will optimize the 8 bit case.
493 : void pushq_imm32(int32_t imm32);
494 : void pushq(Register src);
495 : void pushq(Operand src);
496 :
497 : void popq(Register dst);
498 : void popq(Operand dst);
499 :
500 : void enter(Immediate size);
501 : void leave();
502 :
503 : // Moves
504 : void movb(Register dst, Operand src);
505 : void movb(Register dst, Immediate imm);
506 : void movb(Operand dst, Register src);
507 : void movb(Operand dst, Immediate imm);
508 :
509 : // Move the low 16 bits of a 64-bit register value to a 16-bit
510 : // memory location.
511 : void movw(Register dst, Operand src);
512 : void movw(Operand dst, Register src);
513 : void movw(Operand dst, Immediate imm);
514 :
515 : // Move the offset of the label location relative to the current
516 : // position (after the move) to the destination.
517 : void movl(Operand dst, Label* src);
518 :
519 : // Loads a pointer into a register with a relocation mode.
520 : void movp(Register dst, Address ptr, RelocInfo::Mode rmode);
521 :
522 : // Load a heap number into a register.
523 : // The heap number will not be allocated and embedded into the code right
524 : // away. Instead, we emit the load of a dummy object. Later, when calling
525 : // Assembler::GetCode, the heap number will be allocated and the code will be
526 : // patched by replacing the dummy with the actual object. The RelocInfo for
527 : // the embedded object gets already recorded correctly when emitting the dummy
528 : // move.
529 : void movp_heap_number(Register dst, double value);
530 :
531 : void movp_string(Register dst, const StringConstantBase* str);
532 :
533 : // Loads a 64-bit immediate into a register.
534 : void movq(Register dst, int64_t value,
535 : RelocInfo::Mode rmode = RelocInfo::NONE);
536 : void movq(Register dst, uint64_t value,
537 : RelocInfo::Mode rmode = RelocInfo::NONE);
538 :
539 : void movsxbl(Register dst, Register src);
540 : void movsxbl(Register dst, Operand src);
541 : void movsxbq(Register dst, Register src);
542 : void movsxbq(Register dst, Operand src);
543 : void movsxwl(Register dst, Register src);
544 : void movsxwl(Register dst, Operand src);
545 : void movsxwq(Register dst, Register src);
546 : void movsxwq(Register dst, Operand src);
547 : void movsxlq(Register dst, Register src);
548 : void movsxlq(Register dst, Operand src);
549 :
550 : // Repeated moves.
551 :
552 : void repmovsb();
553 : void repmovsw();
554 : void repmovsp() { emit_repmovs(kSystemPointerSize); }
555 : void repmovsl() { emit_repmovs(kInt32Size); }
556 : void repmovsq() { emit_repmovs(kInt64Size); }
557 :
558 : // Instruction to load from an immediate 64-bit pointer into RAX.
559 : void load_rax(Address value, RelocInfo::Mode rmode);
560 : void load_rax(ExternalReference ext);
561 :
562 : // Conditional moves.
563 : void cmovq(Condition cc, Register dst, Register src);
564 : void cmovq(Condition cc, Register dst, Operand src);
565 : void cmovl(Condition cc, Register dst, Register src);
566 : void cmovl(Condition cc, Register dst, Operand src);
567 :
568 : void cmpb(Register dst, Immediate src) {
569 7990 : immediate_arithmetic_op_8(0x7, dst, src);
570 : }
571 :
572 : void cmpb_al(Immediate src);
573 :
574 : void cmpb(Register dst, Register src) {
575 3933 : arithmetic_op_8(0x3A, dst, src);
576 : }
577 :
578 564 : void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
579 :
580 460 : void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
581 :
582 : void cmpb(Operand dst, Immediate src) {
583 16648 : immediate_arithmetic_op_8(0x7, dst, src);
584 : }
585 :
586 : void cmpw(Operand dst, Immediate src) {
587 272278 : immediate_arithmetic_op_16(0x7, dst, src);
588 : }
589 :
590 : void cmpw(Register dst, Immediate src) {
591 154866 : immediate_arithmetic_op_16(0x7, dst, src);
592 : }
593 :
594 75 : void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
595 :
596 : void cmpw(Register dst, Register src) {
597 448 : arithmetic_op_16(0x3B, dst, src);
598 : }
599 :
600 455 : void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
601 :
602 0 : void testb(Register reg, Operand op) { testb(op, reg); }
603 :
604 0 : void testw(Register reg, Operand op) { testw(op, reg); }
605 :
606 : void andb(Register dst, Immediate src) {
607 : immediate_arithmetic_op_8(0x4, dst, src);
608 : }
609 :
610 : void decb(Register dst);
611 : void decb(Operand dst);
612 :
613 : // Lock prefix.
614 : void lock();
615 :
616 : void xchgb(Register reg, Operand op);
617 : void xchgw(Register reg, Operand op);
618 :
619 : void cmpxchgb(Operand dst, Register src);
620 : void cmpxchgw(Operand dst, Register src);
621 :
622 : // Sign-extends rax into rdx:rax.
623 : void cqo();
624 : // Sign-extends eax into edx:eax.
625 : void cdq();
626 :
627 : // Multiply eax by src, put the result in edx:eax.
628 : void mull(Register src);
629 : void mull(Operand src);
630 : // Multiply rax by src, put the result in rdx:rax.
631 : void mulq(Register src);
632 :
633 : #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
634 : void instruction##p(Register dst, Immediate imm8) { \
635 : shift(dst, imm8, subcode, kSystemPointerSize); \
636 : } \
637 : \
638 : void instruction##l(Register dst, Immediate imm8) { \
639 : shift(dst, imm8, subcode, kInt32Size); \
640 : } \
641 : \
642 : void instruction##q(Register dst, Immediate imm8) { \
643 : shift(dst, imm8, subcode, kInt64Size); \
644 : } \
645 : \
646 : void instruction##p(Operand dst, Immediate imm8) { \
647 : shift(dst, imm8, subcode, kSystemPointerSize); \
648 : } \
649 : \
650 : void instruction##l(Operand dst, Immediate imm8) { \
651 : shift(dst, imm8, subcode, kInt32Size); \
652 : } \
653 : \
654 : void instruction##q(Operand dst, Immediate imm8) { \
655 : shift(dst, imm8, subcode, kInt64Size); \
656 : } \
657 : \
658 : void instruction##p_cl(Register dst) { \
659 : shift(dst, subcode, kSystemPointerSize); \
660 : } \
661 : \
662 : void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
663 : \
664 : void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
665 : \
666 : void instruction##p_cl(Operand dst) { \
667 : shift(dst, subcode, kSystemPointerSize); \
668 : } \
669 : \
670 : void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
671 : \
672 : void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
673 1478501 : SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
674 : #undef DECLARE_SHIFT_INSTRUCTION
675 :
676 : // Shifts dst:src left by cl bits, affecting only dst.
677 : void shld(Register dst, Register src);
678 :
679 : // Shifts src:dst right by cl bits, affecting only dst.
680 : void shrd(Register dst, Register src);
681 :
682 : void store_rax(Address dst, RelocInfo::Mode mode);
683 : void store_rax(ExternalReference ref);
684 :
685 : void subb(Register dst, Immediate src) {
686 2912 : immediate_arithmetic_op_8(0x5, dst, src);
687 : }
688 :
689 : void sub_sp_32(uint32_t imm);
690 :
691 : void testb(Register dst, Register src);
692 : void testb(Register reg, Immediate mask);
693 : void testb(Operand op, Immediate mask);
694 : void testb(Operand op, Register reg);
695 :
696 : void testw(Register dst, Register src);
697 : void testw(Register reg, Immediate mask);
698 : void testw(Operand op, Immediate mask);
699 : void testw(Operand op, Register reg);
700 :
701 : // Bit operations.
702 : void bswapl(Register dst);
703 : void bswapq(Register dst);
704 : void btq(Operand dst, Register src);
705 : void btsq(Operand dst, Register src);
706 : void btsq(Register dst, Immediate imm8);
707 : void btrq(Register dst, Immediate imm8);
708 : void bsrq(Register dst, Register src);
709 : void bsrq(Register dst, Operand src);
710 : void bsrl(Register dst, Register src);
711 : void bsrl(Register dst, Operand src);
712 : void bsfq(Register dst, Register src);
713 : void bsfq(Register dst, Operand src);
714 : void bsfl(Register dst, Register src);
715 : void bsfl(Register dst, Operand src);
716 :
717 : // Miscellaneous
718 : void clc();
719 : void cld();
720 : void cpuid();
721 : void hlt();
722 : void int3();
723 : void nop();
724 : void ret(int imm16);
725 : void ud2();
726 : void setcc(Condition cc, Register reg);
727 :
728 : void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
729 : void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
730 : void pblendw(XMMRegister dst, Operand src, uint8_t mask);
731 : void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
732 : void palignr(XMMRegister dst, Operand src, uint8_t mask);
733 : void palignr(XMMRegister dst, XMMRegister src, uint8_t mask);
734 :
735 : // Label operations & relative jumps (PPUM Appendix D)
736 : //
737 : // Takes a branch opcode (cc) and a label (L) and generates
738 : // either a backward branch or a forward branch and links it
739 : // to the label fixup chain. Usage:
740 : //
741 : // Label L; // unbound label
742 : // j(cc, &L); // forward branch to unbound label
743 : // bind(&L); // bind label to the current pc
744 : // j(cc, &L); // backward branch to bound label
745 : // bind(&L); // illegal: a label may be bound only once
746 : //
747 : // Note: The same Label can be used for forward and backward branches
748 : // but it may be bound only once.
749 :
750 : void bind(Label* L); // binds an unbound label L to the current code position
751 :
752 : // Calls
753 : // Call near relative 32-bit displacement, relative to next instruction.
754 : void call(Label* L);
755 : void call(Address entry, RelocInfo::Mode rmode);
756 : void near_call(Address entry, RelocInfo::Mode rmode);
757 : void near_jmp(Address entry, RelocInfo::Mode rmode);
758 : void call(Handle<Code> target,
759 : RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
760 :
761 : // Calls directly to the given address using a relative offset.
762 : // Should only ever be used in Code objects for calls within the
763 : // same Code object. Should not be used when generating new code (use labels),
764 : // but only when patching existing code.
765 : void call(Address target);
766 :
767 : // Call near absolute indirect, address in register
768 : void call(Register adr);
769 :
770 : // Jumps
771 : // Jump short or near relative.
772 : // Use a 32-bit signed displacement.
773 : // Unconditional jump to L
774 : void jmp(Label* L, Label::Distance distance = Label::kFar);
775 : void jmp(Handle<Code> target, RelocInfo::Mode rmode);
776 :
777 : // Jump near absolute indirect (r64)
778 : void jmp(Register adr);
779 : void jmp(Operand src);
780 :
781 : // Conditional jumps
782 : void j(Condition cc,
783 : Label* L,
784 : Label::Distance distance = Label::kFar);
785 : void j(Condition cc, Address entry, RelocInfo::Mode rmode);
786 : void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
787 :
788 : // Floating-point operations
789 : void fld(int i);
790 :
791 : void fld1();
792 : void fldz();
793 : void fldpi();
794 : void fldln2();
795 :
796 : void fld_s(Operand adr);
797 : void fld_d(Operand adr);
798 :
799 : void fstp_s(Operand adr);
800 : void fstp_d(Operand adr);
801 : void fstp(int index);
802 :
803 : void fild_s(Operand adr);
804 : void fild_d(Operand adr);
805 :
806 : void fist_s(Operand adr);
807 :
808 : void fistp_s(Operand adr);
809 : void fistp_d(Operand adr);
810 :
811 : void fisttp_s(Operand adr);
812 : void fisttp_d(Operand adr);
813 :
814 : void fabs();
815 : void fchs();
816 :
817 : void fadd(int i);
818 : void fsub(int i);
819 : void fmul(int i);
820 : void fdiv(int i);
821 :
822 : void fisub_s(Operand adr);
823 :
824 : void faddp(int i = 1);
825 : void fsubp(int i = 1);
826 : void fsubrp(int i = 1);
827 : void fmulp(int i = 1);
828 : void fdivp(int i = 1);
829 : void fprem();
830 : void fprem1();
831 :
832 : void fxch(int i = 1);
833 : void fincstp();
834 : void ffree(int i = 0);
835 :
836 : void ftst();
837 : void fucomp(int i);
838 : void fucompp();
839 : void fucomi(int i);
840 : void fucomip();
841 :
842 : void fcompp();
843 : void fnstsw_ax();
844 : void fwait();
845 : void fnclex();
846 :
847 : void fsin();
848 : void fcos();
849 : void fptan();
850 : void fyl2x();
851 : void f2xm1();
852 : void fscale();
853 : void fninit();
854 :
855 : void frndint();
856 :
857 : void sahf();
858 :
859 : // SSE instructions
860 : void addss(XMMRegister dst, XMMRegister src);
861 : void addss(XMMRegister dst, Operand src);
862 : void subss(XMMRegister dst, XMMRegister src);
863 : void subss(XMMRegister dst, Operand src);
864 : void mulss(XMMRegister dst, XMMRegister src);
865 : void mulss(XMMRegister dst, Operand src);
866 : void divss(XMMRegister dst, XMMRegister src);
867 : void divss(XMMRegister dst, Operand src);
868 :
869 : void maxss(XMMRegister dst, XMMRegister src);
870 : void maxss(XMMRegister dst, Operand src);
871 : void minss(XMMRegister dst, XMMRegister src);
872 : void minss(XMMRegister dst, Operand src);
873 :
874 : void sqrtss(XMMRegister dst, XMMRegister src);
875 : void sqrtss(XMMRegister dst, Operand src);
876 :
877 : void ucomiss(XMMRegister dst, XMMRegister src);
878 : void ucomiss(XMMRegister dst, Operand src);
879 : void movaps(XMMRegister dst, XMMRegister src);
880 :
881 : // Don't use this unless it's important to keep the
882 : // top half of the destination register unchanged.
883 : // Use movaps when moving float values and movd for integer
884 : // values in xmm registers.
885 : void movss(XMMRegister dst, XMMRegister src);
886 :
887 : void movss(XMMRegister dst, Operand src);
888 : void movss(Operand dst, XMMRegister src);
889 : void shufps(XMMRegister dst, XMMRegister src, byte imm8);
890 :
891 : void cvttss2si(Register dst, Operand src);
892 : void cvttss2si(Register dst, XMMRegister src);
893 : void cvtlsi2ss(XMMRegister dst, Operand src);
894 : void cvtlsi2ss(XMMRegister dst, Register src);
895 :
896 : void andps(XMMRegister dst, XMMRegister src);
897 : void andps(XMMRegister dst, Operand src);
898 : void orps(XMMRegister dst, XMMRegister src);
899 : void orps(XMMRegister dst, Operand src);
900 : void xorps(XMMRegister dst, XMMRegister src);
901 : void xorps(XMMRegister dst, Operand src);
902 :
903 : void addps(XMMRegister dst, XMMRegister src);
904 : void addps(XMMRegister dst, Operand src);
905 : void subps(XMMRegister dst, XMMRegister src);
906 : void subps(XMMRegister dst, Operand src);
907 : void mulps(XMMRegister dst, XMMRegister src);
908 : void mulps(XMMRegister dst, Operand src);
909 : void divps(XMMRegister dst, XMMRegister src);
910 : void divps(XMMRegister dst, Operand src);
911 :
912 : void movmskps(Register dst, XMMRegister src);
913 :
914 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
915 : SIMDPrefix pp, LeadingOpcode m, VexW w);
916 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
917 : SIMDPrefix pp, LeadingOpcode m, VexW w);
918 :
919 : // SSE2 instructions
920 : void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
921 : byte opcode);
922 : void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
923 : byte opcode);
924 : #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
925 : void instruction(XMMRegister dst, XMMRegister src) { \
926 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
927 : } \
928 : void instruction(XMMRegister dst, Operand src) { \
929 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
930 : }
931 :
932 10525 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
933 : #undef DECLARE_SSE2_INSTRUCTION
934 :
935 : #define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
936 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
937 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
938 : } \
939 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
940 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
941 : }
942 :
943 303747 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
944 : #undef DECLARE_SSE2_AVX_INSTRUCTION
945 :
946 : // SSE3
947 : void lddqu(XMMRegister dst, Operand src);
948 :
949 : // SSSE3
950 : void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
951 : byte escape2, byte opcode);
952 : void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
953 : byte escape2, byte opcode);
954 :
955 : #define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
956 : opcode) \
957 : void instruction(XMMRegister dst, XMMRegister src) { \
958 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
959 : } \
960 : void instruction(XMMRegister dst, Operand src) { \
961 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
962 : }
963 :
964 6820 : SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
965 : #undef DECLARE_SSSE3_INSTRUCTION
966 :
967 : // SSE4
968 : void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
969 : byte escape2, byte opcode);
970 : void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
971 : byte escape2, byte opcode);
972 : #define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
973 : opcode) \
974 : void instruction(XMMRegister dst, XMMRegister src) { \
975 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
976 : } \
977 : void instruction(XMMRegister dst, Operand src) { \
978 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
979 : }
980 :
981 1410 : SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
982 : #undef DECLARE_SSE4_INSTRUCTION
983 :
984 : #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
985 : opcode) \
986 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
987 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
988 : } \
989 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
990 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
991 : }
992 :
993 90 : SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
994 150 : SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
995 : #undef DECLARE_SSE34_AVX_INSTRUCTION
996 :
997 : void movd(XMMRegister dst, Register src);
998 : void movd(XMMRegister dst, Operand src);
999 : void movd(Register dst, XMMRegister src);
1000 : void movq(XMMRegister dst, Register src);
1001 : void movq(Register dst, XMMRegister src);
1002 : void movq(XMMRegister dst, XMMRegister src);
1003 :
1004 : // Don't use this unless it's important to keep the
1005 : // top half of the destination register unchanged.
1006 : // Use movapd when moving double values and movq for integer
1007 : // values in xmm registers.
1008 : void movsd(XMMRegister dst, XMMRegister src);
1009 :
1010 : void movsd(Operand dst, XMMRegister src);
1011 : void movsd(XMMRegister dst, Operand src);
1012 :
1013 : void movdqa(Operand dst, XMMRegister src);
1014 : void movdqa(XMMRegister dst, Operand src);
1015 :
1016 : void movdqu(Operand dst, XMMRegister src);
1017 : void movdqu(XMMRegister dst, Operand src);
1018 :
1019 : void movapd(XMMRegister dst, XMMRegister src);
1020 : void movupd(XMMRegister dst, Operand src);
1021 : void movupd(Operand dst, XMMRegister src);
1022 :
1023 : void psllq(XMMRegister reg, byte imm8);
1024 : void psrlq(XMMRegister reg, byte imm8);
1025 : void psllw(XMMRegister reg, byte imm8);
1026 : void pslld(XMMRegister reg, byte imm8);
1027 : void psrlw(XMMRegister reg, byte imm8);
1028 : void psrld(XMMRegister reg, byte imm8);
1029 : void psraw(XMMRegister reg, byte imm8);
1030 : void psrad(XMMRegister reg, byte imm8);
1031 :
1032 : void cvttsd2si(Register dst, Operand src);
1033 : void cvttsd2si(Register dst, XMMRegister src);
1034 : void cvttss2siq(Register dst, XMMRegister src);
1035 : void cvttss2siq(Register dst, Operand src);
1036 : void cvttsd2siq(Register dst, XMMRegister src);
1037 : void cvttsd2siq(Register dst, Operand src);
1038 : void cvttps2dq(XMMRegister dst, Operand src);
1039 : void cvttps2dq(XMMRegister dst, XMMRegister src);
1040 :
1041 : void cvtlsi2sd(XMMRegister dst, Operand src);
1042 : void cvtlsi2sd(XMMRegister dst, Register src);
1043 :
1044 : void cvtqsi2ss(XMMRegister dst, Operand src);
1045 : void cvtqsi2ss(XMMRegister dst, Register src);
1046 :
1047 : void cvtqsi2sd(XMMRegister dst, Operand src);
1048 : void cvtqsi2sd(XMMRegister dst, Register src);
1049 :
1050 :
1051 : void cvtss2sd(XMMRegister dst, XMMRegister src);
1052 : void cvtss2sd(XMMRegister dst, Operand src);
1053 : void cvtsd2ss(XMMRegister dst, XMMRegister src);
1054 : void cvtsd2ss(XMMRegister dst, Operand src);
1055 :
1056 : void cvtsd2si(Register dst, XMMRegister src);
1057 : void cvtsd2siq(Register dst, XMMRegister src);
1058 :
1059 : void addsd(XMMRegister dst, XMMRegister src);
1060 : void addsd(XMMRegister dst, Operand src);
1061 : void subsd(XMMRegister dst, XMMRegister src);
1062 : void subsd(XMMRegister dst, Operand src);
1063 : void mulsd(XMMRegister dst, XMMRegister src);
1064 : void mulsd(XMMRegister dst, Operand src);
1065 : void divsd(XMMRegister dst, XMMRegister src);
1066 : void divsd(XMMRegister dst, Operand src);
1067 :
1068 : void maxsd(XMMRegister dst, XMMRegister src);
1069 : void maxsd(XMMRegister dst, Operand src);
1070 : void minsd(XMMRegister dst, XMMRegister src);
1071 : void minsd(XMMRegister dst, Operand src);
1072 :
1073 : void andpd(XMMRegister dst, XMMRegister src);
1074 : void andpd(XMMRegister dst, Operand src);
1075 : void orpd(XMMRegister dst, XMMRegister src);
1076 : void orpd(XMMRegister dst, Operand src);
1077 : void xorpd(XMMRegister dst, XMMRegister src);
1078 : void xorpd(XMMRegister dst, Operand src);
1079 : void sqrtsd(XMMRegister dst, XMMRegister src);
1080 : void sqrtsd(XMMRegister dst, Operand src);
1081 :
1082 : void haddps(XMMRegister dst, XMMRegister src);
1083 : void haddps(XMMRegister dst, Operand src);
1084 :
1085 : void ucomisd(XMMRegister dst, XMMRegister src);
1086 : void ucomisd(XMMRegister dst, Operand src);
1087 : void cmpltsd(XMMRegister dst, XMMRegister src);
1088 :
1089 : void movmskpd(Register dst, XMMRegister src);
1090 :
1091 : // SSE 4.1 instruction
1092 : void insertps(XMMRegister dst, XMMRegister src, byte imm8);
1093 : void extractps(Register dst, XMMRegister src, byte imm8);
1094 : void pextrb(Register dst, XMMRegister src, int8_t imm8);
1095 : void pextrb(Operand dst, XMMRegister src, int8_t imm8);
1096 : void pextrw(Register dst, XMMRegister src, int8_t imm8);
1097 : void pextrw(Operand dst, XMMRegister src, int8_t imm8);
1098 : void pextrd(Register dst, XMMRegister src, int8_t imm8);
1099 : void pextrd(Operand dst, XMMRegister src, int8_t imm8);
1100 : void pinsrb(XMMRegister dst, Register src, int8_t imm8);
1101 : void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
1102 : void pinsrw(XMMRegister dst, Register src, int8_t imm8);
1103 : void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
1104 : void pinsrd(XMMRegister dst, Register src, int8_t imm8);
1105 : void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
1106 :
1107 : void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
1108 : void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1109 :
1110 : void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
1111 : void cmpps(XMMRegister dst, Operand src, int8_t cmp);
1112 : void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
1113 : void cmppd(XMMRegister dst, Operand src, int8_t cmp);
1114 :
1115 : #define SSE_CMP_P(instr, imm8) \
1116 : void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
1117 : void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
1118 : void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
1119 : void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
1120 :
1121 30 : SSE_CMP_P(cmpeq, 0x0);
1122 40 : SSE_CMP_P(cmplt, 0x1);
1123 50 : SSE_CMP_P(cmple, 0x2);
1124 20 : SSE_CMP_P(cmpneq, 0x4);
1125 20 : SSE_CMP_P(cmpnlt, 0x5);
1126 20 : SSE_CMP_P(cmpnle, 0x6);
1127 :
1128 : #undef SSE_CMP_P
1129 :
1130 : void minps(XMMRegister dst, XMMRegister src);
1131 : void minps(XMMRegister dst, Operand src);
1132 : void maxps(XMMRegister dst, XMMRegister src);
1133 : void maxps(XMMRegister dst, Operand src);
1134 : void rcpps(XMMRegister dst, XMMRegister src);
1135 : void rcpps(XMMRegister dst, Operand src);
1136 : void rsqrtps(XMMRegister dst, XMMRegister src);
1137 : void rsqrtps(XMMRegister dst, Operand src);
1138 : void sqrtps(XMMRegister dst, XMMRegister src);
1139 : void sqrtps(XMMRegister dst, Operand src);
1140 : void movups(XMMRegister dst, XMMRegister src);
1141 : void movups(XMMRegister dst, Operand src);
1142 : void movups(Operand dst, XMMRegister src);
1143 : void psrldq(XMMRegister dst, uint8_t shift);
1144 : void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1145 : void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1146 : void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1147 : void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
1148 : void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1149 : void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
1150 : void cvtdq2ps(XMMRegister dst, XMMRegister src);
1151 : void cvtdq2ps(XMMRegister dst, Operand src);
1152 :
1153 : // AVX instruction
1154 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1155 15 : vfmasd(0x99, dst, src1, src2);
1156 : }
1157 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1158 15 : vfmasd(0xa9, dst, src1, src2);
1159 : }
1160 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1161 15 : vfmasd(0xb9, dst, src1, src2);
1162 : }
1163 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1164 15 : vfmasd(0x99, dst, src1, src2);
1165 : }
1166 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1167 15 : vfmasd(0xa9, dst, src1, src2);
1168 : }
1169 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1170 15 : vfmasd(0xb9, dst, src1, src2);
1171 : }
1172 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1173 10 : vfmasd(0x9b, dst, src1, src2);
1174 : }
1175 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1176 10 : vfmasd(0xab, dst, src1, src2);
1177 : }
1178 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1179 10 : vfmasd(0xbb, dst, src1, src2);
1180 : }
1181 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1182 10 : vfmasd(0x9b, dst, src1, src2);
1183 : }
1184 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1185 10 : vfmasd(0xab, dst, src1, src2);
1186 : }
1187 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1188 10 : vfmasd(0xbb, dst, src1, src2);
1189 : }
1190 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1191 10 : vfmasd(0x9d, dst, src1, src2);
1192 : }
1193 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1194 10 : vfmasd(0xad, dst, src1, src2);
1195 : }
1196 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1197 10 : vfmasd(0xbd, dst, src1, src2);
1198 : }
1199 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1200 10 : vfmasd(0x9d, dst, src1, src2);
1201 : }
1202 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1203 10 : vfmasd(0xad, dst, src1, src2);
1204 : }
1205 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1206 10 : vfmasd(0xbd, dst, src1, src2);
1207 : }
1208 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1209 10 : vfmasd(0x9f, dst, src1, src2);
1210 : }
1211 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1212 10 : vfmasd(0xaf, dst, src1, src2);
1213 : }
1214 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1215 10 : vfmasd(0xbf, dst, src1, src2);
1216 : }
1217 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1218 10 : vfmasd(0x9f, dst, src1, src2);
1219 : }
1220 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1221 10 : vfmasd(0xaf, dst, src1, src2);
1222 : }
1223 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1224 10 : vfmasd(0xbf, dst, src1, src2);
1225 : }
1226 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1227 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1228 :
1229 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1230 10 : vfmass(0x99, dst, src1, src2);
1231 : }
1232 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1233 10 : vfmass(0xa9, dst, src1, src2);
1234 : }
1235 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1236 10 : vfmass(0xb9, dst, src1, src2);
1237 : }
1238 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1239 10 : vfmass(0x99, dst, src1, src2);
1240 : }
1241 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1242 10 : vfmass(0xa9, dst, src1, src2);
1243 : }
1244 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1245 10 : vfmass(0xb9, dst, src1, src2);
1246 : }
1247 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1248 10 : vfmass(0x9b, dst, src1, src2);
1249 : }
1250 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1251 10 : vfmass(0xab, dst, src1, src2);
1252 : }
1253 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1254 10 : vfmass(0xbb, dst, src1, src2);
1255 : }
1256 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1257 10 : vfmass(0x9b, dst, src1, src2);
1258 : }
1259 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1260 10 : vfmass(0xab, dst, src1, src2);
1261 : }
1262 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1263 10 : vfmass(0xbb, dst, src1, src2);
1264 : }
1265 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1266 10 : vfmass(0x9d, dst, src1, src2);
1267 : }
1268 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1269 10 : vfmass(0xad, dst, src1, src2);
1270 : }
1271 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1272 10 : vfmass(0xbd, dst, src1, src2);
1273 : }
1274 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1275 10 : vfmass(0x9d, dst, src1, src2);
1276 : }
1277 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1278 10 : vfmass(0xad, dst, src1, src2);
1279 : }
1280 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1281 10 : vfmass(0xbd, dst, src1, src2);
1282 : }
1283 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1284 10 : vfmass(0x9f, dst, src1, src2);
1285 : }
1286 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1287 10 : vfmass(0xaf, dst, src1, src2);
1288 : }
1289 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1290 10 : vfmass(0xbf, dst, src1, src2);
1291 : }
1292 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1293 10 : vfmass(0x9f, dst, src1, src2);
1294 : }
1295 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1296 10 : vfmass(0xaf, dst, src1, src2);
1297 : }
1298 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1299 10 : vfmass(0xbf, dst, src1, src2);
1300 : }
1301 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1302 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1303 :
1304 : void vmovd(XMMRegister dst, Register src);
1305 : void vmovd(XMMRegister dst, Operand src);
1306 : void vmovd(Register dst, XMMRegister src);
1307 : void vmovq(XMMRegister dst, Register src);
1308 : void vmovq(XMMRegister dst, Operand src);
1309 : void vmovq(Register dst, XMMRegister src);
1310 :
1311 117538 : void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1312 : vsd(0x10, dst, src1, src2);
1313 117539 : }
1314 3235593 : void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
1315 3571748 : void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
1316 :
1317 : #define AVX_SP_3(instr, opcode) \
1318 : AVX_S_3(instr, opcode) \
1319 : AVX_P_3(instr, opcode)
1320 :
1321 : #define AVX_S_3(instr, opcode) \
1322 : AVX_3(instr##ss, opcode, vss) \
1323 : AVX_3(instr##sd, opcode, vsd)
1324 :
1325 : #define AVX_P_3(instr, opcode) \
1326 : AVX_3(instr##ps, opcode, vps) \
1327 : AVX_3(instr##pd, opcode, vpd)
1328 :
1329 : #define AVX_3(instr, opcode, impl) \
1330 : void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1331 : impl(opcode, dst, src1, src2); \
1332 : } \
1333 : void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1334 : impl(opcode, dst, src1, src2); \
1335 : }
1336 :
1337 1462 : AVX_SP_3(vsqrt, 0x51);
1338 165398 : AVX_SP_3(vadd, 0x58);
1339 47615 : AVX_SP_3(vsub, 0x5c);
1340 31652 : AVX_SP_3(vmul, 0x59);
1341 27938 : AVX_SP_3(vdiv, 0x5e);
1342 45 : AVX_SP_3(vmin, 0x5d);
1343 45 : AVX_SP_3(vmax, 0x5f);
1344 867 : AVX_P_3(vand, 0x54);
1345 15 : AVX_P_3(vor, 0x56);
1346 553786 : AVX_P_3(vxor, 0x57);
1347 41006 : AVX_3(vcvtsd2ss, 0x5a, vsd);
1348 20 : AVX_3(vhaddps, 0x7c, vsd);
1349 :
1350 : #undef AVX_3
1351 : #undef AVX_S_3
1352 : #undef AVX_P_3
1353 : #undef AVX_SP_3
1354 :
1355 : void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
1356 204064 : vpd(0x73, xmm2, dst, src);
1357 : emit(imm8);
1358 : }
1359 : void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
1360 235626 : vpd(0x73, xmm6, dst, src);
1361 : emit(imm8);
1362 : }
1363 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1364 11485 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1365 : }
1366 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1367 12836 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1368 : }
1369 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1370 394059 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1371 394059 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
1372 : }
1373 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1374 4178 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
1375 : }
1376 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1377 1289 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1378 1289 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
1379 : }
1380 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1381 9 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
1382 : }
1383 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1384 488 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1385 488 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
1386 : }
1387 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1388 0 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
1389 : }
1390 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1391 20228 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1392 20228 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
1393 : }
1394 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1395 2091 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
1396 : }
1397 660 : void vcvttss2si(Register dst, XMMRegister src) {
1398 660 : XMMRegister idst = XMMRegister::from_code(dst.code());
1399 660 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1400 660 : }
1401 0 : void vcvttss2si(Register dst, Operand src) {
1402 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1403 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1404 0 : }
1405 108234 : void vcvttsd2si(Register dst, XMMRegister src) {
1406 108234 : XMMRegister idst = XMMRegister::from_code(dst.code());
1407 108234 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1408 108234 : }
1409 24059 : void vcvttsd2si(Register dst, Operand src) {
1410 24059 : XMMRegister idst = XMMRegister::from_code(dst.code());
1411 24059 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1412 24059 : }
1413 461 : void vcvttss2siq(Register dst, XMMRegister src) {
1414 461 : XMMRegister idst = XMMRegister::from_code(dst.code());
1415 461 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1416 461 : }
1417 0 : void vcvttss2siq(Register dst, Operand src) {
1418 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1419 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1420 0 : }
1421 65832 : void vcvttsd2siq(Register dst, XMMRegister src) {
1422 65832 : XMMRegister idst = XMMRegister::from_code(dst.code());
1423 65832 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1424 65858 : }
1425 11 : void vcvttsd2siq(Register dst, Operand src) {
1426 11 : XMMRegister idst = XMMRegister::from_code(dst.code());
1427 11 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1428 11 : }
1429 10 : void vcvtsd2si(Register dst, XMMRegister src) {
1430 10 : XMMRegister idst = XMMRegister::from_code(dst.code());
1431 10 : vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
1432 10 : }
1433 : void vucomisd(XMMRegister dst, XMMRegister src) {
1434 242750 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1435 : }
1436 : void vucomisd(XMMRegister dst, Operand src) {
1437 24503 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1438 : }
1439 820 : void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1440 : RoundingMode mode) {
1441 820 : vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1442 821 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1443 821 : }
1444 44897 : void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1445 : RoundingMode mode) {
1446 44897 : vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1447 44904 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1448 44904 : }
1449 :
1450 : void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1451 254616 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1452 : }
1453 : void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
1454 3419548 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1455 : }
1456 :
1457 : void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1458 108558 : vss(0x10, dst, src1, src2);
1459 : }
1460 29533 : void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
1461 706058 : void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
1462 : void vucomiss(XMMRegister dst, XMMRegister src);
1463 : void vucomiss(XMMRegister dst, Operand src);
1464 : void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1465 : void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1466 :
1467 426 : void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
1468 2100 : void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
1469 6641 : void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
1470 6658 : void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
1471 140948 : void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
1472 5 : void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
1473 5 : void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
1474 : void vmovmskps(Register dst, XMMRegister src) {
1475 257 : XMMRegister idst = XMMRegister::from_code(dst.code());
1476 257 : vps(0x50, idst, xmm0, src);
1477 : }
1478 : void vmovmskpd(Register dst, XMMRegister src) {
1479 735 : XMMRegister idst = XMMRegister::from_code(dst.code());
1480 735 : vpd(0x50, idst, xmm0, src);
1481 : }
1482 : void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1483 35 : vps(0xC2, dst, src1, src2);
1484 : emit(cmp);
1485 : }
1486 : void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1487 35 : vps(0xC2, dst, src1, src2);
1488 : emit(cmp);
1489 : }
1490 : void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1491 35 : vpd(0xC2, dst, src1, src2);
1492 : emit(cmp);
1493 : }
1494 : void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1495 35 : vpd(0xC2, dst, src1, src2);
1496 : emit(cmp);
1497 : }
1498 :
1499 : #define AVX_CMP_P(instr, imm8) \
1500 : void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1501 : vcmpps(dst, src1, src2, imm8); \
1502 : } \
1503 : void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
1504 : vcmpps(dst, src1, src2, imm8); \
1505 : } \
1506 : void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1507 : vcmppd(dst, src1, src2, imm8); \
1508 : } \
1509 : void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
1510 : vcmppd(dst, src1, src2, imm8); \
1511 : }
1512 :
1513 40 : AVX_CMP_P(vcmpeq, 0x0);
1514 40 : AVX_CMP_P(vcmplt, 0x1);
1515 40 : AVX_CMP_P(vcmple, 0x2);
1516 40 : AVX_CMP_P(vcmpneq, 0x4);
1517 40 : AVX_CMP_P(vcmpnlt, 0x5);
1518 40 : AVX_CMP_P(vcmpnle, 0x6);
1519 :
1520 : #undef AVX_CMP_P
1521 :
1522 : void vlddqu(XMMRegister dst, Operand src) {
1523 5 : vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
1524 : }
1525 5 : void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1526 5 : vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
1527 : emit(imm8);
1528 5 : }
1529 5 : void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1530 5 : vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
1531 : emit(imm8);
1532 5 : }
1533 5 : void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1534 5 : vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
1535 : emit(imm8);
1536 5 : }
1537 55478 : void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1538 55478 : vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
1539 : emit(imm8);
1540 55479 : }
1541 42066 : void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1542 42066 : vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
1543 : emit(imm8);
1544 42067 : }
1545 5 : void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1546 5 : vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
1547 : emit(imm8);
1548 5 : }
1549 5 : void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
1550 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1551 5 : vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
1552 : emit(imm8);
1553 5 : }
1554 5 : void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
1555 5 : vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
1556 : emit(imm8);
1557 5 : }
1558 5 : void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
1559 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1560 5 : vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
1561 : emit(imm8);
1562 5 : }
1563 5 : void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
1564 5 : vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
1565 : emit(imm8);
1566 5 : }
1567 5 : void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
1568 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1569 5 : vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
1570 : emit(imm8);
1571 5 : }
1572 5 : void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
1573 5 : vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
1574 : emit(imm8);
1575 5 : }
1576 5 : void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1577 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1578 5 : vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
1579 : emit(imm8);
1580 5 : }
1581 5 : void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1582 5 : vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
1583 : emit(imm8);
1584 5 : }
1585 5 : void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1586 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1587 5 : vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
1588 : emit(imm8);
1589 5 : }
1590 5 : void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1591 5 : vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
1592 : emit(imm8);
1593 5 : }
1594 5 : void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1595 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1596 5 : vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
1597 : emit(imm8);
1598 5 : }
1599 5 : void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1600 5 : vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
1601 : emit(imm8);
1602 5 : }
1603 5 : void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1604 5 : vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
1605 : emit(imm8);
1606 5 : }
1607 :
1608 : void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1609 : void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1610 : void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1611 : void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1612 :
1613 : // BMI instruction
1614 : void andnq(Register dst, Register src1, Register src2) {
1615 10 : bmi1q(0xf2, dst, src1, src2);
1616 : }
1617 : void andnq(Register dst, Register src1, Operand src2) {
1618 10 : bmi1q(0xf2, dst, src1, src2);
1619 : }
1620 : void andnl(Register dst, Register src1, Register src2) {
1621 10 : bmi1l(0xf2, dst, src1, src2);
1622 : }
1623 : void andnl(Register dst, Register src1, Operand src2) {
1624 10 : bmi1l(0xf2, dst, src1, src2);
1625 : }
1626 : void bextrq(Register dst, Register src1, Register src2) {
1627 10 : bmi1q(0xf7, dst, src2, src1);
1628 : }
1629 : void bextrq(Register dst, Operand src1, Register src2) {
1630 10 : bmi1q(0xf7, dst, src2, src1);
1631 : }
1632 : void bextrl(Register dst, Register src1, Register src2) {
1633 10 : bmi1l(0xf7, dst, src2, src1);
1634 : }
1635 : void bextrl(Register dst, Operand src1, Register src2) {
1636 10 : bmi1l(0xf7, dst, src2, src1);
1637 : }
1638 10 : void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
1639 10 : void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
1640 10 : void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
1641 10 : void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
1642 10 : void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
1643 10 : void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
1644 10 : void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
1645 10 : void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
1646 10 : void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
1647 10 : void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
1648 10 : void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
1649 10 : void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
1650 : void tzcntq(Register dst, Register src);
1651 : void tzcntq(Register dst, Operand src);
1652 : void tzcntl(Register dst, Register src);
1653 : void tzcntl(Register dst, Operand src);
1654 :
1655 : void lzcntq(Register dst, Register src);
1656 : void lzcntq(Register dst, Operand src);
1657 : void lzcntl(Register dst, Register src);
1658 : void lzcntl(Register dst, Operand src);
1659 :
1660 : void popcntq(Register dst, Register src);
1661 : void popcntq(Register dst, Operand src);
1662 : void popcntl(Register dst, Register src);
1663 : void popcntl(Register dst, Operand src);
1664 :
1665 : void bzhiq(Register dst, Register src1, Register src2) {
1666 10 : bmi2q(kNone, 0xf5, dst, src2, src1);
1667 : }
1668 : void bzhiq(Register dst, Operand src1, Register src2) {
1669 10 : bmi2q(kNone, 0xf5, dst, src2, src1);
1670 : }
1671 : void bzhil(Register dst, Register src1, Register src2) {
1672 10 : bmi2l(kNone, 0xf5, dst, src2, src1);
1673 : }
1674 : void bzhil(Register dst, Operand src1, Register src2) {
1675 10 : bmi2l(kNone, 0xf5, dst, src2, src1);
1676 : }
1677 : void mulxq(Register dst1, Register dst2, Register src) {
1678 10 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1679 : }
1680 : void mulxq(Register dst1, Register dst2, Operand src) {
1681 10 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1682 : }
1683 : void mulxl(Register dst1, Register dst2, Register src) {
1684 10 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1685 : }
1686 : void mulxl(Register dst1, Register dst2, Operand src) {
1687 10 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1688 : }
1689 : void pdepq(Register dst, Register src1, Register src2) {
1690 10 : bmi2q(kF2, 0xf5, dst, src1, src2);
1691 : }
1692 : void pdepq(Register dst, Register src1, Operand src2) {
1693 10 : bmi2q(kF2, 0xf5, dst, src1, src2);
1694 : }
1695 : void pdepl(Register dst, Register src1, Register src2) {
1696 10 : bmi2l(kF2, 0xf5, dst, src1, src2);
1697 : }
1698 : void pdepl(Register dst, Register src1, Operand src2) {
1699 10 : bmi2l(kF2, 0xf5, dst, src1, src2);
1700 : }
1701 : void pextq(Register dst, Register src1, Register src2) {
1702 10 : bmi2q(kF3, 0xf5, dst, src1, src2);
1703 : }
1704 : void pextq(Register dst, Register src1, Operand src2) {
1705 10 : bmi2q(kF3, 0xf5, dst, src1, src2);
1706 : }
1707 : void pextl(Register dst, Register src1, Register src2) {
1708 10 : bmi2l(kF3, 0xf5, dst, src1, src2);
1709 : }
1710 : void pextl(Register dst, Register src1, Operand src2) {
1711 10 : bmi2l(kF3, 0xf5, dst, src1, src2);
1712 : }
1713 : void sarxq(Register dst, Register src1, Register src2) {
1714 10 : bmi2q(kF3, 0xf7, dst, src2, src1);
1715 : }
1716 : void sarxq(Register dst, Operand src1, Register src2) {
1717 10 : bmi2q(kF3, 0xf7, dst, src2, src1);
1718 : }
1719 : void sarxl(Register dst, Register src1, Register src2) {
1720 10 : bmi2l(kF3, 0xf7, dst, src2, src1);
1721 : }
1722 : void sarxl(Register dst, Operand src1, Register src2) {
1723 10 : bmi2l(kF3, 0xf7, dst, src2, src1);
1724 : }
1725 : void shlxq(Register dst, Register src1, Register src2) {
1726 10 : bmi2q(k66, 0xf7, dst, src2, src1);
1727 : }
1728 : void shlxq(Register dst, Operand src1, Register src2) {
1729 10 : bmi2q(k66, 0xf7, dst, src2, src1);
1730 : }
1731 : void shlxl(Register dst, Register src1, Register src2) {
1732 10 : bmi2l(k66, 0xf7, dst, src2, src1);
1733 : }
1734 : void shlxl(Register dst, Operand src1, Register src2) {
1735 10 : bmi2l(k66, 0xf7, dst, src2, src1);
1736 : }
1737 : void shrxq(Register dst, Register src1, Register src2) {
1738 10 : bmi2q(kF2, 0xf7, dst, src2, src1);
1739 : }
1740 : void shrxq(Register dst, Operand src1, Register src2) {
1741 10 : bmi2q(kF2, 0xf7, dst, src2, src1);
1742 : }
1743 : void shrxl(Register dst, Register src1, Register src2) {
1744 10 : bmi2l(kF2, 0xf7, dst, src2, src1);
1745 : }
1746 : void shrxl(Register dst, Operand src1, Register src2) {
1747 10 : bmi2l(kF2, 0xf7, dst, src2, src1);
1748 : }
1749 : void rorxq(Register dst, Register src, byte imm8);
1750 : void rorxq(Register dst, Operand src, byte imm8);
1751 : void rorxl(Register dst, Register src, byte imm8);
1752 : void rorxl(Register dst, Operand src, byte imm8);
1753 :
1754 : void lfence();
1755 : void pause();
1756 :
1757 : // Check the code size generated from label to here.
1758 : int SizeOfCodeGeneratedSince(Label* label) {
1759 : return pc_offset() - label->pos();
1760 : }
1761 :
1762 : // Record a deoptimization reason that can be used by a log or cpu profiler.
1763 : // Use --trace-deopt to enable.
1764 : void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1765 : int id);
1766 :
1767 :
1768 : // Writes a single word of data in the code stream.
1769 : // Used for inline tables, e.g., jump-tables.
1770 : void db(uint8_t data);
1771 : void dd(uint32_t data);
1772 : void dq(uint64_t data);
1773 : void dp(uintptr_t data) { dq(data); }
1774 : void dq(Label* label);
1775 :
1776 : // Patch entries for partial constant pool.
1777 : void PatchConstPool();
1778 :
1779 : // Check if use partial constant pool for this rmode.
1780 : static bool UseConstPoolFor(RelocInfo::Mode rmode);
1781 :
1782 : // Check if there is less than kGap bytes available in the buffer.
1783 : // If this is the case, we need to grow the buffer before emitting
1784 : // an instruction or relocation information.
1785 : inline bool buffer_overflow() const {
1786 249438256 : return pc_ >= reloc_info_writer.pos() - kGap;
1787 : }
1788 :
1789 : // Get the number of bytes available in the buffer.
1790 : inline int available_space() const {
1791 : return static_cast<int>(reloc_info_writer.pos() - pc_);
1792 : }
1793 :
1794 : static bool IsNop(Address addr);
1795 :
1796 : // Avoid overflows for displacements etc.
1797 : static constexpr int kMaximalBufferSize = 512 * MB;
1798 :
1799 : byte byte_at(int pos) { return buffer_start_[pos]; }
1800 1793452 : void set_byte_at(int pos, byte value) { buffer_start_[pos] = value; }
1801 :
1802 : protected:
1803 : // Call near indirect
1804 : void call(Operand operand);
1805 :
1806 : private:
1807 63672331 : byte* addr_at(int pos) { return buffer_start_ + pos; }
1808 39667775 : uint32_t long_at(int pos) {
1809 39667775 : return *reinterpret_cast<uint32_t*>(addr_at(pos));
1810 : }
1811 18615003 : void long_at_put(int pos, uint32_t x) {
1812 19210577 : *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
1813 : }
1814 :
1815 : // code emission
1816 : void GrowBuffer();
1817 :
1818 406217890 : void emit(byte x) { *pc_++ = x; }
1819 : inline void emitl(uint32_t x);
1820 : inline void emitp(Address x, RelocInfo::Mode rmode);
1821 : inline void emitq(uint64_t x);
1822 : inline void emitw(uint16_t x);
1823 : inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
1824 : inline void emit(Immediate x);
1825 :
1826 : // Emits a REX prefix that encodes a 64-bit operand size and
1827 : // the top bit of both register codes.
1828 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1829 : // REX.W is set.
1830 : inline void emit_rex_64(XMMRegister reg, Register rm_reg);
1831 : inline void emit_rex_64(Register reg, XMMRegister rm_reg);
1832 : inline void emit_rex_64(Register reg, Register rm_reg);
1833 : inline void emit_rex_64(XMMRegister reg, XMMRegister rm_reg);
1834 :
1835 : // Emits a REX prefix that encodes a 64-bit operand size and
1836 : // the top bit of the destination, index, and base register codes.
1837 : // The high bit of reg is used for REX.R, the high bit of op's base
1838 : // register is used for REX.B, and the high bit of op's index register
1839 : // is used for REX.X. REX.W is set.
1840 : inline void emit_rex_64(Register reg, Operand op);
1841 : inline void emit_rex_64(XMMRegister reg, Operand op);
1842 :
1843 : // Emits a REX prefix that encodes a 64-bit operand size and
1844 : // the top bit of the register code.
1845 : // The high bit of register is used for REX.B.
1846 : // REX.W is set and REX.R and REX.X are clear.
1847 : inline void emit_rex_64(Register rm_reg);
1848 :
1849 : // Emits a REX prefix that encodes a 64-bit operand size and
1850 : // the top bit of the index and base register codes.
1851 : // The high bit of op's base register is used for REX.B, and the high
1852 : // bit of op's index register is used for REX.X.
1853 : // REX.W is set and REX.R clear.
1854 : inline void emit_rex_64(Operand op);
1855 :
1856 : // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
1857 : void emit_rex_64() { emit(0x48); }
1858 :
1859 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1860 : // REX.W is clear.
1861 : inline void emit_rex_32(Register reg, Register rm_reg);
1862 :
1863 : // The high bit of reg is used for REX.R, the high bit of op's base
1864 : // register is used for REX.B, and the high bit of op's index register
1865 : // is used for REX.X. REX.W is cleared.
1866 : inline void emit_rex_32(Register reg, Operand op);
1867 :
1868 : // High bit of rm_reg goes to REX.B.
1869 : // REX.W, REX.R and REX.X are clear.
1870 : inline void emit_rex_32(Register rm_reg);
1871 :
1872 : // High bit of base goes to REX.B and high bit of index to REX.X.
1873 : // REX.W and REX.R are clear.
1874 : inline void emit_rex_32(Operand op);
1875 :
1876 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1877 : // REX.W is cleared. If no REX bits are set, no byte is emitted.
1878 : inline void emit_optional_rex_32(Register reg, Register rm_reg);
1879 :
1880 : // The high bit of reg is used for REX.R, the high bit of op's base
1881 : // register is used for REX.B, and the high bit of op's index register
1882 : // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
1883 : // is emitted.
1884 : inline void emit_optional_rex_32(Register reg, Operand op);
1885 :
1886 : // As for emit_optional_rex_32(Register, Register), except that
1887 : // the registers are XMM registers.
1888 : inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
1889 :
1890 : // As for emit_optional_rex_32(Register, Register), except that
1891 : // one of the registers is an XMM registers.
1892 : inline void emit_optional_rex_32(XMMRegister reg, Register base);
1893 :
1894 : // As for emit_optional_rex_32(Register, Register), except that
1895 : // one of the registers is an XMM registers.
1896 : inline void emit_optional_rex_32(Register reg, XMMRegister base);
1897 :
1898 : // As for emit_optional_rex_32(Register, Operand), except that
1899 : // the register is an XMM register.
1900 : inline void emit_optional_rex_32(XMMRegister reg, Operand op);
1901 :
1902 : // Optionally do as emit_rex_32(Register) if the register number has
1903 : // the high bit set.
1904 : inline void emit_optional_rex_32(Register rm_reg);
1905 : inline void emit_optional_rex_32(XMMRegister rm_reg);
1906 :
1907 : // Optionally do as emit_rex_32(Operand) if the operand register
1908 : // numbers have a high bit set.
1909 : inline void emit_optional_rex_32(Operand op);
1910 :
1911 : void emit_rex(int size) {
1912 0 : if (size == kInt64Size) {
1913 : emit_rex_64();
1914 : } else {
1915 : DCHECK_EQ(size, kInt32Size);
1916 : }
1917 : }
1918 :
1919 : template<class P1>
1920 : void emit_rex(P1 p1, int size) {
1921 23570208 : if (size == kInt64Size) {
1922 : emit_rex_64(p1);
1923 : } else {
1924 : DCHECK_EQ(size, kInt32Size);
1925 : emit_optional_rex_32(p1);
1926 : }
1927 : }
1928 :
1929 : template<class P1, class P2>
1930 71335326 : void emit_rex(P1 p1, P2 p2, int size) {
1931 71335326 : if (size == kInt64Size) {
1932 : emit_rex_64(p1, p2);
1933 : } else {
1934 : DCHECK_EQ(size, kInt32Size);
1935 : emit_optional_rex_32(p1, p2);
1936 : }
1937 71335326 : }
1938 :
1939 : // Emit vex prefix
1940 : void emit_vex2_byte0() { emit(0xc5); }
1941 : inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
1942 : SIMDPrefix pp);
1943 : void emit_vex3_byte0() { emit(0xc4); }
1944 : inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
1945 : inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
1946 : inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
1947 : SIMDPrefix pp);
1948 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
1949 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1950 : VexW w);
1951 : inline void emit_vex_prefix(Register reg, Register v, Register rm,
1952 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1953 : VexW w);
1954 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
1955 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1956 : VexW w);
1957 : inline void emit_vex_prefix(Register reg, Register v, Operand rm,
1958 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1959 : VexW w);
1960 :
1961 : // Emit the ModR/M byte, and optionally the SIB byte and
1962 : // 1- or 4-byte offset for a memory operand. Also encodes
1963 : // the second operand of the operation, a register or operation
1964 : // subcode, into the reg field of the ModR/M byte.
1965 : void emit_operand(Register reg, Operand adr) {
1966 53816868 : emit_operand(reg.low_bits(), adr);
1967 : }
1968 :
1969 : // Emit the ModR/M byte, and optionally the SIB byte and
1970 : // 1- or 4-byte offset for a memory operand. Also used to encode
1971 : // a three-bit opcode extension into the ModR/M byte.
1972 : void emit_operand(int rm, Operand adr);
1973 :
1974 : // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
1975 : void emit_modrm(Register reg, Register rm_reg) {
1976 32533569 : emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
1977 : }
1978 :
1979 : // Emit a ModR/M byte with an operation subcode in the reg field and
1980 : // a register in the rm_reg field.
1981 : void emit_modrm(int code, Register rm_reg) {
1982 : DCHECK(is_uint3(code));
1983 40399063 : emit(0xC0 | code << 3 | rm_reg.low_bits());
1984 : }
1985 :
1986 : // Emit the code-object-relative offset of the label's position
1987 : inline void emit_code_relative_offset(Label* label);
1988 :
1989 : // The first argument is the reg field, the second argument is the r/m field.
1990 : void emit_sse_operand(XMMRegister dst, XMMRegister src);
1991 : void emit_sse_operand(XMMRegister reg, Operand adr);
1992 : void emit_sse_operand(Register reg, Operand adr);
1993 : void emit_sse_operand(XMMRegister dst, Register src);
1994 : void emit_sse_operand(Register dst, XMMRegister src);
1995 : void emit_sse_operand(XMMRegister dst);
1996 :
1997 : // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
1998 : // AND, OR, XOR, or CMP. The encodings of these operations are all
1999 : // similar, differing just in the opcode or in the reg field of the
2000 : // ModR/M byte.
2001 : void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
2002 : void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
2003 : void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
2004 : void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
2005 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2006 : void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
2007 : void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
2008 : // Operate on a byte in memory or register.
2009 : void immediate_arithmetic_op_8(byte subcode,
2010 : Register dst,
2011 : Immediate src);
2012 : void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
2013 : // Operate on a word in memory or register.
2014 : void immediate_arithmetic_op_16(byte subcode,
2015 : Register dst,
2016 : Immediate src);
2017 : void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
2018 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2019 : void immediate_arithmetic_op(byte subcode,
2020 : Register dst,
2021 : Immediate src,
2022 : int size);
2023 : void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
2024 : int size);
2025 :
2026 : // Emit machine code for a shift operation.
2027 : void shift(Operand dst, Immediate shift_amount, int subcode, int size);
2028 : void shift(Register dst, Immediate shift_amount, int subcode, int size);
2029 : // Shift dst by cl % 64 bits.
2030 : void shift(Register dst, int subcode, int size);
2031 : void shift(Operand dst, int subcode, int size);
2032 :
2033 : void emit_farith(int b1, int b2, int i);
2034 :
2035 : // labels
2036 : // void print(Label* L);
2037 : void bind_to(Label* L, int pos);
2038 :
2039 : // record reloc info for current pc_
2040 : void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
2041 :
2042 : // Arithmetics
2043 : void emit_add(Register dst, Register src, int size) {
2044 1163268 : arithmetic_op(0x03, dst, src, size);
2045 : }
2046 :
2047 : void emit_add(Register dst, Immediate src, int size) {
2048 2240071 : immediate_arithmetic_op(0x0, dst, src, size);
2049 : }
2050 :
2051 : void emit_add(Register dst, Operand src, int size) {
2052 26893 : arithmetic_op(0x03, dst, src, size);
2053 : }
2054 :
2055 : void emit_add(Operand dst, Register src, int size) {
2056 170601 : arithmetic_op(0x1, src, dst, size);
2057 : }
2058 :
2059 : void emit_add(Operand dst, Immediate src, int size) {
2060 7922 : immediate_arithmetic_op(0x0, dst, src, size);
2061 : }
2062 :
2063 : void emit_and(Register dst, Register src, int size) {
2064 822447 : arithmetic_op(0x23, dst, src, size);
2065 : }
2066 :
2067 : void emit_and(Register dst, Operand src, int size) {
2068 4157 : arithmetic_op(0x23, dst, src, size);
2069 : }
2070 :
2071 : void emit_and(Operand dst, Register src, int size) {
2072 : arithmetic_op(0x21, src, dst, size);
2073 : }
2074 :
2075 : void emit_and(Register dst, Immediate src, int size) {
2076 1450248 : immediate_arithmetic_op(0x4, dst, src, size);
2077 : }
2078 :
2079 : void emit_and(Operand dst, Immediate src, int size) {
2080 0 : immediate_arithmetic_op(0x4, dst, src, size);
2081 : }
2082 :
2083 : void emit_cmp(Register dst, Register src, int size) {
2084 1994151 : arithmetic_op(0x3B, dst, src, size);
2085 : }
2086 :
2087 : void emit_cmp(Register dst, Operand src, int size) {
2088 900549 : arithmetic_op(0x3B, dst, src, size);
2089 : }
2090 :
2091 : void emit_cmp(Operand dst, Register src, int size) {
2092 1521059 : arithmetic_op(0x39, src, dst, size);
2093 : }
2094 :
2095 : void emit_cmp(Register dst, Immediate src, int size) {
2096 3355204 : immediate_arithmetic_op(0x7, dst, src, size);
2097 : }
2098 :
2099 : void emit_cmp(Operand dst, Immediate src, int size) {
2100 194870 : immediate_arithmetic_op(0x7, dst, src, size);
2101 : }
2102 :
2103 : // Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
2104 : // src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
2105 : // operation is only atomic if prefixed by the lock instruction.
2106 : void emit_cmpxchg(Operand dst, Register src, int size);
2107 :
2108 : void emit_dec(Register dst, int size);
2109 : void emit_dec(Operand dst, int size);
2110 :
2111 : // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
2112 : // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
2113 : // when size is 32.
2114 : void emit_idiv(Register src, int size);
2115 : void emit_div(Register src, int size);
2116 :
2117 : // Signed multiply instructions.
2118 : // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
2119 : void emit_imul(Register src, int size);
2120 : void emit_imul(Operand src, int size);
2121 : void emit_imul(Register dst, Register src, int size);
2122 : void emit_imul(Register dst, Operand src, int size);
2123 : void emit_imul(Register dst, Register src, Immediate imm, int size);
2124 : void emit_imul(Register dst, Operand src, Immediate imm, int size);
2125 :
2126 : void emit_inc(Register dst, int size);
2127 : void emit_inc(Operand dst, int size);
2128 :
2129 : void emit_lea(Register dst, Operand src, int size);
2130 :
2131 : void emit_mov(Register dst, Operand src, int size);
2132 : void emit_mov(Register dst, Register src, int size);
2133 : void emit_mov(Operand dst, Register src, int size);
2134 : void emit_mov(Register dst, Immediate value, int size);
2135 : void emit_mov(Operand dst, Immediate value, int size);
2136 :
2137 : void emit_movzxb(Register dst, Operand src, int size);
2138 : void emit_movzxb(Register dst, Register src, int size);
2139 : void emit_movzxw(Register dst, Operand src, int size);
2140 : void emit_movzxw(Register dst, Register src, int size);
2141 :
2142 : void emit_neg(Register dst, int size);
2143 : void emit_neg(Operand dst, int size);
2144 :
2145 : void emit_not(Register dst, int size);
2146 : void emit_not(Operand dst, int size);
2147 :
2148 : void emit_or(Register dst, Register src, int size) {
2149 171139 : arithmetic_op(0x0B, dst, src, size);
2150 : }
2151 :
2152 : void emit_or(Register dst, Operand src, int size) {
2153 7400 : arithmetic_op(0x0B, dst, src, size);
2154 : }
2155 :
2156 : void emit_or(Operand dst, Register src, int size) {
2157 5 : arithmetic_op(0x9, src, dst, size);
2158 : }
2159 :
2160 : void emit_or(Register dst, Immediate src, int size) {
2161 51261 : immediate_arithmetic_op(0x1, dst, src, size);
2162 : }
2163 :
2164 : void emit_or(Operand dst, Immediate src, int size) {
2165 0 : immediate_arithmetic_op(0x1, dst, src, size);
2166 : }
2167 :
2168 : void emit_repmovs(int size);
2169 :
2170 : void emit_sbb(Register dst, Register src, int size) {
2171 5 : arithmetic_op(0x1b, dst, src, size);
2172 : }
2173 :
2174 : void emit_sub(Register dst, Register src, int size) {
2175 232606 : arithmetic_op(0x2B, dst, src, size);
2176 : }
2177 :
2178 : void emit_sub(Register dst, Immediate src, int size) {
2179 4329044 : immediate_arithmetic_op(0x5, dst, src, size);
2180 : }
2181 :
2182 : void emit_sub(Register dst, Operand src, int size) {
2183 179158 : arithmetic_op(0x2B, dst, src, size);
2184 : }
2185 :
2186 : void emit_sub(Operand dst, Register src, int size) {
2187 170606 : arithmetic_op(0x29, src, dst, size);
2188 : }
2189 :
2190 : void emit_sub(Operand dst, Immediate src, int size) {
2191 117 : immediate_arithmetic_op(0x5, dst, src, size);
2192 : }
2193 :
2194 : void emit_test(Register dst, Register src, int size);
2195 : void emit_test(Register reg, Immediate mask, int size);
2196 : void emit_test(Operand op, Register reg, int size);
2197 : void emit_test(Operand op, Immediate mask, int size);
2198 : void emit_test(Register reg, Operand op, int size) {
2199 364 : return emit_test(op, reg, size);
2200 : }
2201 :
2202 : void emit_xchg(Register dst, Register src, int size);
2203 : void emit_xchg(Register dst, Operand src, int size);
2204 :
2205 2752000 : void emit_xor(Register dst, Register src, int size) {
2206 2760986 : if (size == kInt64Size && dst.code() == src.code()) {
2207 : // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
2208 : // there is no need to make this a 64 bit operation.
2209 6348 : arithmetic_op(0x33, dst, src, kInt32Size);
2210 : } else {
2211 2745652 : arithmetic_op(0x33, dst, src, size);
2212 : }
2213 2752013 : }
2214 :
2215 : void emit_xor(Register dst, Operand src, int size) {
2216 654 : arithmetic_op(0x33, dst, src, size);
2217 : }
2218 :
2219 : void emit_xor(Register dst, Immediate src, int size) {
2220 44173 : immediate_arithmetic_op(0x6, dst, src, size);
2221 : }
2222 :
2223 : void emit_xor(Operand dst, Immediate src, int size) {
2224 0 : immediate_arithmetic_op(0x6, dst, src, size);
2225 : }
2226 :
2227 : void emit_xor(Operand dst, Register src, int size) {
2228 5 : arithmetic_op(0x31, src, dst, size);
2229 : }
2230 :
2231 : // Most BMI instructions are similar.
2232 : void bmi1q(byte op, Register reg, Register vreg, Register rm);
2233 : void bmi1q(byte op, Register reg, Register vreg, Operand rm);
2234 : void bmi1l(byte op, Register reg, Register vreg, Register rm);
2235 : void bmi1l(byte op, Register reg, Register vreg, Operand rm);
2236 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2237 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2238 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2239 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2240 :
2241 : // record the position of jmp/jcc instruction
2242 : void record_farjmp_position(Label* L, int pos);
2243 :
2244 : bool is_optimizable_farjmp(int idx);
2245 :
2246 : void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
2247 :
2248 : int WriteCodeComments();
2249 :
2250 : friend class EnsureSpace;
2251 : friend class RegExpMacroAssemblerX64;
2252 :
2253 : // code generation
2254 : RelocInfoWriter reloc_info_writer;
2255 :
2256 : // Internal reference positions, required for (potential) patching in
2257 : // GrowBuffer(); contains only those internal references whose labels
2258 : // are already bound.
2259 : std::deque<int> internal_reference_positions_;
2260 :
2261 : // Variables for this instance of assembler
2262 : int farjmp_num_ = 0;
2263 : std::deque<int> farjmp_positions_;
2264 : std::map<Label*, std::vector<int>> label_farjmp_maps_;
2265 :
2266 : ConstPool constpool_;
2267 :
2268 : friend class ConstPool;
2269 : };
2270 :
2271 :
2272 : // Helper class that ensures that there is enough space for generating
2273 : // instructions and relocation information. The constructor makes
2274 : // sure that there is enough space and (in debug mode) the destructor
2275 : // checks that we did not generate too much.
2276 : class EnsureSpace {
2277 : public:
2278 : explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
2279 249438256 : if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
2280 : #ifdef DEBUG
2281 : space_before_ = assembler_->available_space();
2282 : #endif
2283 : }
2284 :
2285 : #ifdef DEBUG
2286 : ~EnsureSpace() {
2287 : int bytes_generated = space_before_ - assembler_->available_space();
2288 : DCHECK(bytes_generated < assembler_->kGap);
2289 : }
2290 : #endif
2291 :
2292 : private:
2293 : Assembler* assembler_;
2294 : #ifdef DEBUG
2295 : int space_before_;
2296 : #endif
2297 : };
2298 :
2299 : } // namespace internal
2300 : } // namespace v8
2301 :
2302 : #endif // V8_X64_ASSEMBLER_X64_H_
|