Line data Source code
1 : // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 : // All Rights Reserved.
3 : //
4 : // Redistribution and use in source and binary forms, with or without
5 : // modification, are permitted provided that the following conditions are
6 : // met:
7 : //
8 : // - Redistributions of source code must retain the above copyright notice,
9 : // this list of conditions and the following disclaimer.
10 : //
11 : // - Redistribution in binary form must reproduce the above copyright
12 : // notice, this list of conditions and the following disclaimer in the
13 : // documentation and/or other materials provided with the distribution.
14 : //
15 : // - Neither the name of Sun Microsystems or the names of contributors may
16 : // be used to endorse or promote products derived from this software without
17 : // specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 : // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 : // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 : // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 : // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 : // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 : // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 : // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 : // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 : // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 : // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : // The original source code covered by the above license above has been
32 : // modified significantly by Google Inc.
33 : // Copyright 2012 the V8 project authors. All rights reserved.
34 :
35 : // A lightweight X64 Assembler.
36 :
37 : #ifndef V8_X64_ASSEMBLER_X64_H_
38 : #define V8_X64_ASSEMBLER_X64_H_
39 :
40 : #include <deque>
41 : #include <map>
42 : #include <vector>
43 :
44 : #include "src/assembler.h"
45 : #include "src/label.h"
46 : #include "src/objects/smi.h"
47 : #include "src/x64/constants-x64.h"
48 : #include "src/x64/register-x64.h"
49 : #include "src/x64/sse-instr.h"
50 : #if defined(V8_OS_WIN_X64)
51 : #include "src/unwinding-info-win64.h"
52 : #endif
53 :
54 : namespace v8 {
55 : namespace internal {
56 :
57 : class SafepointTableBuilder;
58 :
59 : // Utility functions
60 :
61 : enum Condition {
62 : // any value < 0 is considered no_condition
63 : no_condition = -1,
64 :
65 : overflow = 0,
66 : no_overflow = 1,
67 : below = 2,
68 : above_equal = 3,
69 : equal = 4,
70 : not_equal = 5,
71 : below_equal = 6,
72 : above = 7,
73 : negative = 8,
74 : positive = 9,
75 : parity_even = 10,
76 : parity_odd = 11,
77 : less = 12,
78 : greater_equal = 13,
79 : less_equal = 14,
80 : greater = 15,
81 :
82 : // Fake conditions that are handled by the
83 : // opcodes using them.
84 : always = 16,
85 : never = 17,
86 : // aliases
87 : carry = below,
88 : not_carry = above_equal,
89 : zero = equal,
90 : not_zero = not_equal,
91 : sign = negative,
92 : not_sign = positive,
93 : last_condition = greater
94 : };
95 :
96 :
97 : // Returns the equivalent of !cc.
98 : // Negation of the default no_condition (-1) results in a non-default
99 : // no_condition value (-2). As long as tests for no_condition check
100 : // for condition < 0, this will work as expected.
101 : inline Condition NegateCondition(Condition cc) {
102 464172 : return static_cast<Condition>(cc ^ 1);
103 : }
104 :
105 :
106 : enum RoundingMode {
107 : kRoundToNearest = 0x0,
108 : kRoundDown = 0x1,
109 : kRoundUp = 0x2,
110 : kRoundToZero = 0x3
111 : };
112 :
113 :
114 : // -----------------------------------------------------------------------------
115 : // Machine instruction Immediates
116 :
117 : class Immediate {
118 : public:
119 1225019 : explicit constexpr Immediate(int32_t value) : value_(value) {}
120 : explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
121 : : value_(value), rmode_(rmode) {}
122 : explicit Immediate(Smi value)
123 : : value_(static_cast<int32_t>(static_cast<intptr_t>(value.ptr()))) {
124 : DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
125 : }
126 :
127 : private:
128 : const int32_t value_;
129 : const RelocInfo::Mode rmode_ = RelocInfo::NONE;
130 :
131 : friend class Assembler;
132 : };
133 : ASSERT_TRIVIALLY_COPYABLE(Immediate);
134 : static_assert(sizeof(Immediate) <= kSystemPointerSize,
135 : "Immediate must be small enough to pass it by value");
136 :
137 : class Immediate64 {
138 : public:
139 : explicit constexpr Immediate64(int64_t value) : value_(value) {}
140 : explicit constexpr Immediate64(int64_t value, RelocInfo::Mode rmode)
141 : : value_(value), rmode_(rmode) {}
142 : explicit constexpr Immediate64(Address value, RelocInfo::Mode rmode)
143 52657087 : : value_(static_cast<int64_t>(value)), rmode_(rmode) {}
144 :
145 : private:
146 : const int64_t value_;
147 : const RelocInfo::Mode rmode_ = RelocInfo::NONE;
148 :
149 : friend class Assembler;
150 : };
151 :
152 : // -----------------------------------------------------------------------------
153 : // Machine instruction Operands
154 :
155 : enum ScaleFactor : int8_t {
156 : times_1 = 0,
157 : times_2 = 1,
158 : times_4 = 2,
159 : times_8 = 3,
160 : times_int_size = times_4,
161 : times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
162 : times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
163 : };
164 :
165 : class V8_EXPORT_PRIVATE Operand {
166 : public:
167 54623590 : struct Data {
168 : byte rex = 0;
169 : byte buf[9];
170 : byte len = 1; // number of bytes of buf_ in use.
171 : int8_t addend; // for rip + offset + addend.
172 : };
173 :
174 : // [base + disp/r]
175 : Operand(Register base, int32_t disp);
176 :
177 : // [base + index*scale + disp/r]
178 : Operand(Register base,
179 : Register index,
180 : ScaleFactor scale,
181 : int32_t disp);
182 :
183 : // [index*scale + disp/r]
184 : Operand(Register index,
185 : ScaleFactor scale,
186 : int32_t disp);
187 :
188 : // Offset from existing memory operand.
189 : // Offset is added to existing displacement as 32-bit signed values and
190 : // this must not overflow.
191 : Operand(Operand base, int32_t offset);
192 :
193 : // [rip + disp/r]
194 : explicit Operand(Label* label, int addend = 0);
195 :
196 : Operand(const Operand&) V8_NOEXCEPT = default;
197 :
198 : // Checks whether either base or index register is the given register.
199 : // Does not check the "reg" part of the Operand.
200 : bool AddressUsesRegister(Register reg) const;
201 :
202 : // Queries related to the size of the generated instruction.
203 : // Whether the generated instruction will have a REX prefix.
204 : bool requires_rex() const { return data_.rex != 0; }
205 : // Size of the ModR/M, SIB and displacement parts of the generated
206 : // instruction.
207 : int operand_size() const { return data_.len; }
208 :
209 : const Data& data() const { return data_; }
210 :
211 : private:
212 : const Data data_;
213 : };
214 : ASSERT_TRIVIALLY_COPYABLE(Operand);
215 : static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
216 : "Operand must be small enough to pass it by value");
217 :
218 : #define ASSEMBLER_INSTRUCTION_LIST(V) \
219 : V(add) \
220 : V(and) \
221 : V(cmp) \
222 : V(cmpxchg) \
223 : V(dec) \
224 : V(idiv) \
225 : V(div) \
226 : V(imul) \
227 : V(inc) \
228 : V(lea) \
229 : V(mov) \
230 : V(movzxb) \
231 : V(movzxw) \
232 : V(neg) \
233 : V(not) \
234 : V(or) \
235 : V(repmovs) \
236 : V(sbb) \
237 : V(sub) \
238 : V(test) \
239 : V(xchg) \
240 : V(xor)
241 :
242 : // Shift instructions on operands/registers with kInt32Size and kInt64Size.
243 : #define SHIFT_INSTRUCTION_LIST(V) \
244 : V(rol, 0x0) \
245 : V(ror, 0x1) \
246 : V(rcl, 0x2) \
247 : V(rcr, 0x3) \
248 : V(shl, 0x4) \
249 : V(shr, 0x5) \
250 : V(sar, 0x7)
251 :
252 : // Partial Constant Pool
253 : // Different from complete constant pool (like arm does), partial constant pool
254 : // only takes effects for shareable constants in order to reduce code size.
255 : // Partial constant pool does not emit constant pool entries at the end of each
256 : // code object. Instead, it keeps the first shareable constant inlined in the
257 : // instructions and uses rip-relative memory loadings for the same constants in
258 : // subsequent instructions. These rip-relative memory loadings will target at
259 : // the position of the first inlined constant. For example:
260 : //
261 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
262 : // …
263 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
264 : // …
265 : //
266 : // turns into
267 : //
268 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
269 : // …
270 : // REX.W movq r10,[rip+0xffffff96] ; 7 bytes
271 : // …
272 :
273 41346124 : class ConstPool {
274 : public:
275 41349751 : explicit ConstPool(Assembler* assm) : assm_(assm) {}
276 : // Returns true when partial constant pool is valid for this entry.
277 : bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode);
278 : bool IsEmpty() const { return entries_.empty(); }
279 :
280 : void PatchEntries();
281 : // Discard any pending pool entries.
282 : void Clear();
283 :
284 : private:
285 : // Adds a shared entry to entries_. Returns true if this is not the first time
286 : // we add this entry, false otherwise.
287 : bool AddSharedEntry(uint64_t data, int offset);
288 :
289 : // Check if the instruction is a rip-relative move.
290 : bool IsMoveRipRelative(Address instr);
291 :
292 : Assembler* assm_;
293 :
294 : // Values, pc offsets of entries.
295 : typedef std::multimap<uint64_t, int> EntryMap;
296 : EntryMap entries_;
297 :
298 : // Number of bytes taken up by the displacement of rip-relative addressing.
299 : static constexpr int kRipRelativeDispSize = 4; // 32-bit displacement.
300 : // Distance between the address of the displacement in the rip-relative move
301 : // instruction and the head address of the instruction.
302 : static constexpr int kMoveRipRelativeDispOffset =
303 : 3; // REX Opcode ModRM Displacement
304 : // Distance between the address of the imm64 in the 'movq reg, imm64'
305 : // instruction and the head address of the instruction.
306 : static constexpr int kMoveImm64Offset = 2; // REX Opcode imm64
307 : // A mask for rip-relative move instruction.
308 : static constexpr uint32_t kMoveRipRelativeMask = 0x00C7FFFB;
309 : // The bits for a rip-relative move instruction after mask.
310 : static constexpr uint32_t kMoveRipRelativeInstr = 0x00058B48;
311 : };
312 :
313 : class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
314 : private:
315 : // We check before assembling an instruction that there is sufficient
316 : // space to write an instruction and its relocation information.
317 : // The relocation writer's position must be kGap bytes above the end of
318 : // the generated instructions. This leaves enough space for the
319 : // longest possible x64 instruction, 15 bytes, and the longest possible
320 : // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
321 : // (There is a 15 byte limit on x64 instruction length that rules out some
322 : // otherwise valid instructions.)
323 : // This allows for a single, fast space check per instruction.
324 : static constexpr int kGap = 32;
325 :
326 : public:
327 : // Create an assembler. Instructions and relocation information are emitted
328 : // into a buffer, with the instructions starting from the beginning and the
329 : // relocation information starting from the end of the buffer. See CodeDesc
330 : // for a detailed comment on the layout (globals.h).
331 : //
332 : // If the provided buffer is nullptr, the assembler allocates and grows its
333 : // own buffer. Otherwise it takes ownership of the provided buffer.
334 : explicit Assembler(const AssemblerOptions&,
335 : std::unique_ptr<AssemblerBuffer> = {});
336 82695987 : ~Assembler() override = default;
337 :
338 : // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
339 : static constexpr int kNoHandlerTable = 0;
340 : static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
341 : void GetCode(Isolate* isolate, CodeDesc* desc,
342 : SafepointTableBuilder* safepoint_table_builder,
343 : int handler_table_offset);
344 :
345 : // Convenience wrapper for code without safepoint or handler tables.
346 : void GetCode(Isolate* isolate, CodeDesc* desc) {
347 314282 : GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
348 : }
349 :
350 : void FinalizeJumpOptimizationInfo();
351 :
352 : // Unused on this architecture.
353 : void MaybeEmitOutOfLineConstantPool() {}
354 :
355 : // Read/Modify the code target in the relative branch/call instruction at pc.
356 : // On the x64 architecture, we use relative jumps with a 32-bit displacement
357 : // to jump to other Code objects in the Code space in the heap.
358 : // Jumps to C functions are done indirectly through a 64-bit register holding
359 : // the absolute address of the target.
360 : // These functions convert between absolute Addresses of Code objects and
361 : // the relative displacements stored in the code.
362 : // The isolate argument is unused (and may be nullptr) when skipping flushing.
363 : static inline Address target_address_at(Address pc, Address constant_pool);
364 : static inline void set_target_address_at(
365 : Address pc, Address constant_pool, Address target,
366 : ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
367 :
368 : // Return the code target address at a call site from the return address
369 : // of that call in the instruction stream.
370 : static inline Address target_address_from_return_address(Address pc);
371 :
372 : // This sets the branch destination (which is in the instruction on x64).
373 : // This is for calls and branches within generated code.
374 : inline static void deserialization_set_special_target_at(
375 : Address instruction_payload, Code code, Address target);
376 :
377 : // Get the size of the special target encoded at 'instruction_payload'.
378 : inline static int deserialization_special_target_size(
379 : Address instruction_payload);
380 :
381 : // This sets the internal reference at the pc.
382 : inline static void deserialization_set_target_internal_reference_at(
383 : Address pc, Address target,
384 : RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
385 :
386 : inline Handle<Code> code_target_object_handle_at(Address pc);
387 : inline Address runtime_entry_at(Address pc);
388 :
389 : // Number of bytes taken up by the branch target in the code.
390 : static constexpr int kSpecialTargetSize = 4; // 32-bit displacement.
391 : // Distance between the address of the code target in the call instruction
392 : // and the return address pushed on the stack.
393 : static constexpr int kCallTargetAddressOffset = 4; // 32-bit displacement.
394 :
395 : // One byte opcode for test eax,0xXXXXXXXX.
396 : static constexpr byte kTestEaxByte = 0xA9;
397 : // One byte opcode for test al, 0xXX.
398 : static constexpr byte kTestAlByte = 0xA8;
399 : // One byte opcode for nop.
400 : static constexpr byte kNopByte = 0x90;
401 :
402 : // One byte prefix for a short conditional jump.
403 : static constexpr byte kJccShortPrefix = 0x70;
404 : static constexpr byte kJncShortOpcode = kJccShortPrefix | not_carry;
405 : static constexpr byte kJcShortOpcode = kJccShortPrefix | carry;
406 : static constexpr byte kJnzShortOpcode = kJccShortPrefix | not_zero;
407 : static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
408 :
409 : // VEX prefix encodings.
410 : enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
411 : enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
412 : enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
413 : enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
414 :
415 : // ---------------------------------------------------------------------------
416 : // Code generation
417 : //
418 : // Function names correspond one-to-one to x64 instruction mnemonics.
419 : // Unless specified otherwise, instructions operate on 64-bit operands.
420 : //
421 : // If we need versions of an assembly instruction that operate on different
422 : // width arguments, we add a single-letter suffix specifying the width.
423 : // This is done for the following instructions: mov, cmp, inc, dec,
424 : // add, sub, and test.
425 : // There are no versions of these instructions without the suffix.
426 : // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
427 : // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
428 : // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
429 : // - Instructions on 64-bit (quadword) operands/registers use 'q'.
430 : // - Instructions on operands/registers with pointer size use 'p'.
431 :
432 : #define DECLARE_INSTRUCTION(instruction) \
433 : template <class P1> \
434 : void instruction##_tagged(P1 p1) { \
435 : emit_##instruction(p1, kTaggedSize); \
436 : } \
437 : \
438 : template <class P1> \
439 : void instruction##l(P1 p1) { \
440 : emit_##instruction(p1, kInt32Size); \
441 : } \
442 : \
443 : template <class P1> \
444 : void instruction##q(P1 p1) { \
445 : emit_##instruction(p1, kInt64Size); \
446 : } \
447 : \
448 : template <class P1, class P2> \
449 : void instruction##_tagged(P1 p1, P2 p2) { \
450 : emit_##instruction(p1, p2, kTaggedSize); \
451 : } \
452 : \
453 : template <class P1, class P2> \
454 : void instruction##l(P1 p1, P2 p2) { \
455 : emit_##instruction(p1, p2, kInt32Size); \
456 : } \
457 : \
458 : template <class P1, class P2> \
459 : void instruction##q(P1 p1, P2 p2) { \
460 : emit_##instruction(p1, p2, kInt64Size); \
461 : } \
462 : \
463 : template <class P1, class P2, class P3> \
464 : void instruction##l(P1 p1, P2 p2, P3 p3) { \
465 : emit_##instruction(p1, p2, p3, kInt32Size); \
466 : } \
467 : \
468 : template <class P1, class P2, class P3> \
469 : void instruction##q(P1 p1, P2 p2, P3 p3) { \
470 : emit_##instruction(p1, p2, p3, kInt64Size); \
471 : }
472 131116807 : ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
473 : #undef DECLARE_INSTRUCTION
474 :
475 : // Insert the smallest number of nop instructions
476 : // possible to align the pc offset to a multiple
477 : // of m, where m must be a power of 2.
478 : void Align(int m);
479 : // Insert the smallest number of zero bytes possible to align the pc offset
480 : // to a mulitple of m. m must be a power of 2 (>= 2).
481 : void DataAlign(int m);
482 : void Nop(int bytes = 1);
483 : // Aligns code to something that's optimal for a jump target for the platform.
484 : void CodeTargetAlign();
485 :
486 : // Stack
487 : void pushfq();
488 : void popfq();
489 :
490 : void pushq(Immediate value);
491 : // Push a 32 bit integer, and guarantee that it is actually pushed as a
492 : // 32 bit value, the normal push will optimize the 8 bit case.
493 : void pushq_imm32(int32_t imm32);
494 : void pushq(Register src);
495 : void pushq(Operand src);
496 :
497 : void popq(Register dst);
498 : void popq(Operand dst);
499 :
500 : void enter(Immediate size);
501 : void leave();
502 :
503 : // Moves
504 : void movb(Register dst, Operand src);
505 : void movb(Register dst, Immediate imm);
506 : void movb(Operand dst, Register src);
507 : void movb(Operand dst, Immediate imm);
508 :
509 : // Move the low 16 bits of a 64-bit register value to a 16-bit
510 : // memory location.
511 : void movw(Register dst, Operand src);
512 : void movw(Operand dst, Register src);
513 : void movw(Operand dst, Immediate imm);
514 :
515 : // Move the offset of the label location relative to the current
516 : // position (after the move) to the destination.
517 : void movl(Operand dst, Label* src);
518 :
519 : // Load a heap number into a register.
520 : // The heap number will not be allocated and embedded into the code right
521 : // away. Instead, we emit the load of a dummy object. Later, when calling
522 : // Assembler::GetCode, the heap number will be allocated and the code will be
523 : // patched by replacing the dummy with the actual object. The RelocInfo for
524 : // the embedded object gets already recorded correctly when emitting the dummy
525 : // move.
526 : void movq_heap_number(Register dst, double value);
527 :
528 : void movq_string(Register dst, const StringConstantBase* str);
529 :
530 : // Loads a 64-bit immediate into a register.
531 : void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
532 : void movq(Register dst, uint64_t value) {
533 143634 : movq(dst, Immediate64(static_cast<int64_t>(value)));
534 : }
535 :
536 : void movsxbl(Register dst, Register src);
537 : void movsxbl(Register dst, Operand src);
538 : void movsxbq(Register dst, Register src);
539 : void movsxbq(Register dst, Operand src);
540 : void movsxwl(Register dst, Register src);
541 : void movsxwl(Register dst, Operand src);
542 : void movsxwq(Register dst, Register src);
543 : void movsxwq(Register dst, Operand src);
544 : void movsxlq(Register dst, Register src);
545 : void movsxlq(Register dst, Operand src);
546 :
547 : // Repeated moves.
548 :
549 : void repmovsb();
550 : void repmovsw();
551 : void repmovsl() { emit_repmovs(kInt32Size); }
552 : void repmovsq() { emit_repmovs(kInt64Size); }
553 :
554 : // Instruction to load from an immediate 64-bit pointer into RAX.
555 : void load_rax(Address value, RelocInfo::Mode rmode);
556 : void load_rax(ExternalReference ext);
557 :
558 : // Conditional moves.
559 : void cmovq(Condition cc, Register dst, Register src);
560 : void cmovq(Condition cc, Register dst, Operand src);
561 : void cmovl(Condition cc, Register dst, Register src);
562 : void cmovl(Condition cc, Register dst, Operand src);
563 :
564 : void cmpb(Register dst, Immediate src) {
565 7070 : immediate_arithmetic_op_8(0x7, dst, src);
566 : }
567 :
568 : void cmpb_al(Immediate src);
569 :
570 : void cmpb(Register dst, Register src) {
571 3358 : arithmetic_op_8(0x3A, dst, src);
572 : }
573 :
574 453 : void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
575 :
576 460 : void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
577 :
578 : void cmpb(Operand dst, Immediate src) {
579 15210 : immediate_arithmetic_op_8(0x7, dst, src);
580 : }
581 :
582 : void cmpw(Operand dst, Immediate src) {
583 224357 : immediate_arithmetic_op_16(0x7, dst, src);
584 : }
585 :
586 : void cmpw(Register dst, Immediate src) {
587 155163 : immediate_arithmetic_op_16(0x7, dst, src);
588 : }
589 :
590 60 : void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
591 :
592 : void cmpw(Register dst, Register src) {
593 448 : arithmetic_op_16(0x3B, dst, src);
594 : }
595 :
596 455 : void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
597 :
598 0 : void testb(Register reg, Operand op) { testb(op, reg); }
599 :
600 0 : void testw(Register reg, Operand op) { testw(op, reg); }
601 :
602 : void andb(Register dst, Immediate src) {
603 : immediate_arithmetic_op_8(0x4, dst, src);
604 : }
605 :
606 : void decb(Register dst);
607 : void decb(Operand dst);
608 :
609 : // Lock prefix.
610 : void lock();
611 :
612 : void xchgb(Register reg, Operand op);
613 : void xchgw(Register reg, Operand op);
614 :
615 : void cmpxchgb(Operand dst, Register src);
616 : void cmpxchgw(Operand dst, Register src);
617 :
618 : // Sign-extends rax into rdx:rax.
619 : void cqo();
620 : // Sign-extends eax into edx:eax.
621 : void cdq();
622 :
623 : // Multiply eax by src, put the result in edx:eax.
624 : void mull(Register src);
625 : void mull(Operand src);
626 : // Multiply rax by src, put the result in rdx:rax.
627 : void mulq(Register src);
628 :
629 : #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
630 : void instruction##l(Register dst, Immediate imm8) { \
631 : shift(dst, imm8, subcode, kInt32Size); \
632 : } \
633 : \
634 : void instruction##q(Register dst, Immediate imm8) { \
635 : shift(dst, imm8, subcode, kInt64Size); \
636 : } \
637 : \
638 : void instruction##l(Operand dst, Immediate imm8) { \
639 : shift(dst, imm8, subcode, kInt32Size); \
640 : } \
641 : \
642 : void instruction##q(Operand dst, Immediate imm8) { \
643 : shift(dst, imm8, subcode, kInt64Size); \
644 : } \
645 : \
646 : void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
647 : \
648 : void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
649 : \
650 : void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
651 : \
652 : void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
653 1309416 : SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
654 : #undef DECLARE_SHIFT_INSTRUCTION
655 :
656 : // Shifts dst:src left by cl bits, affecting only dst.
657 : void shld(Register dst, Register src);
658 :
659 : // Shifts src:dst right by cl bits, affecting only dst.
660 : void shrd(Register dst, Register src);
661 :
662 : void store_rax(Address dst, RelocInfo::Mode mode);
663 : void store_rax(ExternalReference ref);
664 :
665 : void subb(Register dst, Immediate src) {
666 2334 : immediate_arithmetic_op_8(0x5, dst, src);
667 : }
668 :
669 : void sub_sp_32(uint32_t imm);
670 :
671 : void testb(Register dst, Register src);
672 : void testb(Register reg, Immediate mask);
673 : void testb(Operand op, Immediate mask);
674 : void testb(Operand op, Register reg);
675 :
676 : void testw(Register dst, Register src);
677 : void testw(Register reg, Immediate mask);
678 : void testw(Operand op, Immediate mask);
679 : void testw(Operand op, Register reg);
680 :
681 : // Bit operations.
682 : void bswapl(Register dst);
683 : void bswapq(Register dst);
684 : void btq(Operand dst, Register src);
685 : void btsq(Operand dst, Register src);
686 : void btsq(Register dst, Immediate imm8);
687 : void btrq(Register dst, Immediate imm8);
688 : void bsrq(Register dst, Register src);
689 : void bsrq(Register dst, Operand src);
690 : void bsrl(Register dst, Register src);
691 : void bsrl(Register dst, Operand src);
692 : void bsfq(Register dst, Register src);
693 : void bsfq(Register dst, Operand src);
694 : void bsfl(Register dst, Register src);
695 : void bsfl(Register dst, Operand src);
696 :
697 : // Miscellaneous
698 : void clc();
699 : void cld();
700 : void cpuid();
701 : void hlt();
702 : void int3();
703 : void nop();
704 : void ret(int imm16);
705 : void ud2();
706 : void setcc(Condition cc, Register reg);
707 :
708 : void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
709 : void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
710 : void pblendw(XMMRegister dst, Operand src, uint8_t mask);
711 : void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
712 : void palignr(XMMRegister dst, Operand src, uint8_t mask);
713 : void palignr(XMMRegister dst, XMMRegister src, uint8_t mask);
714 :
715 : // Label operations & relative jumps (PPUM Appendix D)
716 : //
717 : // Takes a branch opcode (cc) and a label (L) and generates
718 : // either a backward branch or a forward branch and links it
719 : // to the label fixup chain. Usage:
720 : //
721 : // Label L; // unbound label
722 : // j(cc, &L); // forward branch to unbound label
723 : // bind(&L); // bind label to the current pc
724 : // j(cc, &L); // backward branch to bound label
725 : // bind(&L); // illegal: a label may be bound only once
726 : //
727 : // Note: The same Label can be used for forward and backward branches
728 : // but it may be bound only once.
729 :
730 : void bind(Label* L); // binds an unbound label L to the current code position
731 :
732 : // Calls
733 : // Call near relative 32-bit displacement, relative to next instruction.
734 : void call(Label* L);
735 : void call(Address entry, RelocInfo::Mode rmode);
736 :
737 : // Explicitly emit a near call / near jump. The displacement is relative to
738 : // the next instructions (which starts at {pc_offset() + kNearJmpInstrSize}).
739 : static constexpr int kNearJmpInstrSize = 5;
740 : void near_call(intptr_t disp, RelocInfo::Mode rmode);
741 : void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
742 :
743 : void call(Handle<Code> target,
744 : RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
745 :
746 : // Calls directly to the given address using a relative offset.
747 : // Should only ever be used in Code objects for calls within the
748 : // same Code object. Should not be used when generating new code (use labels),
749 : // but only when patching existing code.
750 : void call(Address target);
751 :
752 : // Call near absolute indirect, address in register
753 : void call(Register adr);
754 :
755 : // Jumps
756 : // Jump short or near relative.
757 : // Use a 32-bit signed displacement.
758 : // Unconditional jump to L
759 : void jmp(Label* L, Label::Distance distance = Label::kFar);
760 : void jmp(Handle<Code> target, RelocInfo::Mode rmode);
761 :
762 : // Jump near absolute indirect (r64)
763 : void jmp(Register adr);
764 : void jmp(Operand src);
765 :
766 : // Conditional jumps
767 : void j(Condition cc,
768 : Label* L,
769 : Label::Distance distance = Label::kFar);
770 : void j(Condition cc, Address entry, RelocInfo::Mode rmode);
771 : void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
772 :
773 : // Floating-point operations
774 : void fld(int i);
775 :
776 : void fld1();
777 : void fldz();
778 : void fldpi();
779 : void fldln2();
780 :
781 : void fld_s(Operand adr);
782 : void fld_d(Operand adr);
783 :
784 : void fstp_s(Operand adr);
785 : void fstp_d(Operand adr);
786 : void fstp(int index);
787 :
788 : void fild_s(Operand adr);
789 : void fild_d(Operand adr);
790 :
791 : void fist_s(Operand adr);
792 :
793 : void fistp_s(Operand adr);
794 : void fistp_d(Operand adr);
795 :
796 : void fisttp_s(Operand adr);
797 : void fisttp_d(Operand adr);
798 :
799 : void fabs();
800 : void fchs();
801 :
802 : void fadd(int i);
803 : void fsub(int i);
804 : void fmul(int i);
805 : void fdiv(int i);
806 :
807 : void fisub_s(Operand adr);
808 :
809 : void faddp(int i = 1);
810 : void fsubp(int i = 1);
811 : void fsubrp(int i = 1);
812 : void fmulp(int i = 1);
813 : void fdivp(int i = 1);
814 : void fprem();
815 : void fprem1();
816 :
817 : void fxch(int i = 1);
818 : void fincstp();
819 : void ffree(int i = 0);
820 :
821 : void ftst();
822 : void fucomp(int i);
823 : void fucompp();
824 : void fucomi(int i);
825 : void fucomip();
826 :
827 : void fcompp();
828 : void fnstsw_ax();
829 : void fwait();
830 : void fnclex();
831 :
832 : void fsin();
833 : void fcos();
834 : void fptan();
835 : void fyl2x();
836 : void f2xm1();
837 : void fscale();
838 : void fninit();
839 :
840 : void frndint();
841 :
842 : void sahf();
843 :
844 : // SSE instructions
845 : void addss(XMMRegister dst, XMMRegister src);
846 : void addss(XMMRegister dst, Operand src);
847 : void subss(XMMRegister dst, XMMRegister src);
848 : void subss(XMMRegister dst, Operand src);
849 : void mulss(XMMRegister dst, XMMRegister src);
850 : void mulss(XMMRegister dst, Operand src);
851 : void divss(XMMRegister dst, XMMRegister src);
852 : void divss(XMMRegister dst, Operand src);
853 :
854 : void maxss(XMMRegister dst, XMMRegister src);
855 : void maxss(XMMRegister dst, Operand src);
856 : void minss(XMMRegister dst, XMMRegister src);
857 : void minss(XMMRegister dst, Operand src);
858 :
859 : void sqrtss(XMMRegister dst, XMMRegister src);
860 : void sqrtss(XMMRegister dst, Operand src);
861 :
862 : void ucomiss(XMMRegister dst, XMMRegister src);
863 : void ucomiss(XMMRegister dst, Operand src);
864 : void movaps(XMMRegister dst, XMMRegister src);
865 :
866 : // Don't use this unless it's important to keep the
867 : // top half of the destination register unchanged.
868 : // Use movaps when moving float values and movd for integer
869 : // values in xmm registers.
870 : void movss(XMMRegister dst, XMMRegister src);
871 :
872 : void movss(XMMRegister dst, Operand src);
873 : void movss(Operand dst, XMMRegister src);
874 : void shufps(XMMRegister dst, XMMRegister src, byte imm8);
875 :
876 : void cvttss2si(Register dst, Operand src);
877 : void cvttss2si(Register dst, XMMRegister src);
878 : void cvtlsi2ss(XMMRegister dst, Operand src);
879 : void cvtlsi2ss(XMMRegister dst, Register src);
880 :
881 : void andps(XMMRegister dst, XMMRegister src);
882 : void andps(XMMRegister dst, Operand src);
883 : void andnps(XMMRegister dst, XMMRegister src);
884 : void andnps(XMMRegister dst, Operand src);
885 : void orps(XMMRegister dst, XMMRegister src);
886 : void orps(XMMRegister dst, Operand src);
887 : void xorps(XMMRegister dst, XMMRegister src);
888 : void xorps(XMMRegister dst, Operand src);
889 :
890 : void addps(XMMRegister dst, XMMRegister src);
891 : void addps(XMMRegister dst, Operand src);
892 : void subps(XMMRegister dst, XMMRegister src);
893 : void subps(XMMRegister dst, Operand src);
894 : void mulps(XMMRegister dst, XMMRegister src);
895 : void mulps(XMMRegister dst, Operand src);
896 : void divps(XMMRegister dst, XMMRegister src);
897 : void divps(XMMRegister dst, Operand src);
898 :
899 : void movmskps(Register dst, XMMRegister src);
900 :
901 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
902 : SIMDPrefix pp, LeadingOpcode m, VexW w);
903 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
904 : SIMDPrefix pp, LeadingOpcode m, VexW w);
905 :
906 : // SSE2 instructions
907 : void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
908 : byte opcode);
909 : void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
910 : byte opcode);
911 : #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
912 : void instruction(XMMRegister dst, XMMRegister src) { \
913 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
914 : } \
915 : void instruction(XMMRegister dst, Operand src) { \
916 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
917 : }
918 :
919 4073 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
920 : #undef DECLARE_SSE2_INSTRUCTION
921 :
922 : #define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
923 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
924 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
925 : } \
926 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
927 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
928 : }
929 :
930 312564 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
931 : #undef DECLARE_SSE2_AVX_INSTRUCTION
932 :
933 : // SSE3
934 : void lddqu(XMMRegister dst, Operand src);
935 :
936 : // SSSE3
937 : void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
938 : byte escape2, byte opcode);
939 : void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
940 : byte escape2, byte opcode);
941 :
942 : #define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
943 : opcode) \
944 : void instruction(XMMRegister dst, XMMRegister src) { \
945 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
946 : } \
947 : void instruction(XMMRegister dst, Operand src) { \
948 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
949 : }
950 :
951 2822 : SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
952 : #undef DECLARE_SSSE3_INSTRUCTION
953 :
954 : // SSE4
955 : void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
956 : byte escape2, byte opcode);
957 : void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
958 : byte escape2, byte opcode);
959 : #define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
960 : opcode) \
961 : void instruction(XMMRegister dst, XMMRegister src) { \
962 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
963 : } \
964 : void instruction(XMMRegister dst, Operand src) { \
965 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
966 : }
967 :
968 652 : SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
969 : #undef DECLARE_SSE4_INSTRUCTION
970 :
971 : #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
972 : opcode) \
973 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
974 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
975 : } \
976 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
977 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
978 : }
979 :
980 90 : SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
981 150 : SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
982 : #undef DECLARE_SSE34_AVX_INSTRUCTION
983 :
984 : void movd(XMMRegister dst, Register src);
985 : void movd(XMMRegister dst, Operand src);
986 : void movd(Register dst, XMMRegister src);
987 : void movq(XMMRegister dst, Register src);
988 : void movq(Register dst, XMMRegister src);
989 : void movq(XMMRegister dst, XMMRegister src);
990 :
991 : // Don't use this unless it's important to keep the
992 : // top half of the destination register unchanged.
993 : // Use movapd when moving double values and movq for integer
994 : // values in xmm registers.
995 : void movsd(XMMRegister dst, XMMRegister src);
996 :
997 : void movsd(Operand dst, XMMRegister src);
998 : void movsd(XMMRegister dst, Operand src);
999 :
1000 : void movdqa(Operand dst, XMMRegister src);
1001 : void movdqa(XMMRegister dst, Operand src);
1002 :
1003 : void movdqu(Operand dst, XMMRegister src);
1004 : void movdqu(XMMRegister dst, Operand src);
1005 :
1006 : void movapd(XMMRegister dst, XMMRegister src);
1007 : void movupd(XMMRegister dst, Operand src);
1008 : void movupd(Operand dst, XMMRegister src);
1009 :
1010 : void psllq(XMMRegister reg, byte imm8);
1011 : void psrlq(XMMRegister reg, byte imm8);
1012 : void psllw(XMMRegister reg, byte imm8);
1013 : void pslld(XMMRegister reg, byte imm8);
1014 : void psrlw(XMMRegister reg, byte imm8);
1015 : void psrld(XMMRegister reg, byte imm8);
1016 : void psraw(XMMRegister reg, byte imm8);
1017 : void psrad(XMMRegister reg, byte imm8);
1018 :
1019 : void cvttsd2si(Register dst, Operand src);
1020 : void cvttsd2si(Register dst, XMMRegister src);
1021 : void cvttss2siq(Register dst, XMMRegister src);
1022 : void cvttss2siq(Register dst, Operand src);
1023 : void cvttsd2siq(Register dst, XMMRegister src);
1024 : void cvttsd2siq(Register dst, Operand src);
1025 : void cvttps2dq(XMMRegister dst, Operand src);
1026 : void cvttps2dq(XMMRegister dst, XMMRegister src);
1027 :
1028 : void cvtlsi2sd(XMMRegister dst, Operand src);
1029 : void cvtlsi2sd(XMMRegister dst, Register src);
1030 :
1031 : void cvtqsi2ss(XMMRegister dst, Operand src);
1032 : void cvtqsi2ss(XMMRegister dst, Register src);
1033 :
1034 : void cvtqsi2sd(XMMRegister dst, Operand src);
1035 : void cvtqsi2sd(XMMRegister dst, Register src);
1036 :
1037 :
1038 : void cvtss2sd(XMMRegister dst, XMMRegister src);
1039 : void cvtss2sd(XMMRegister dst, Operand src);
1040 : void cvtsd2ss(XMMRegister dst, XMMRegister src);
1041 : void cvtsd2ss(XMMRegister dst, Operand src);
1042 :
1043 : void cvtsd2si(Register dst, XMMRegister src);
1044 : void cvtsd2siq(Register dst, XMMRegister src);
1045 :
1046 : void addsd(XMMRegister dst, XMMRegister src);
1047 : void addsd(XMMRegister dst, Operand src);
1048 : void subsd(XMMRegister dst, XMMRegister src);
1049 : void subsd(XMMRegister dst, Operand src);
1050 : void mulsd(XMMRegister dst, XMMRegister src);
1051 : void mulsd(XMMRegister dst, Operand src);
1052 : void divsd(XMMRegister dst, XMMRegister src);
1053 : void divsd(XMMRegister dst, Operand src);
1054 :
1055 : void maxsd(XMMRegister dst, XMMRegister src);
1056 : void maxsd(XMMRegister dst, Operand src);
1057 : void minsd(XMMRegister dst, XMMRegister src);
1058 : void minsd(XMMRegister dst, Operand src);
1059 :
1060 : void andpd(XMMRegister dst, XMMRegister src);
1061 : void andpd(XMMRegister dst, Operand src);
1062 : void orpd(XMMRegister dst, XMMRegister src);
1063 : void orpd(XMMRegister dst, Operand src);
1064 : void xorpd(XMMRegister dst, XMMRegister src);
1065 : void xorpd(XMMRegister dst, Operand src);
1066 : void sqrtsd(XMMRegister dst, XMMRegister src);
1067 : void sqrtsd(XMMRegister dst, Operand src);
1068 :
1069 : void haddps(XMMRegister dst, XMMRegister src);
1070 : void haddps(XMMRegister dst, Operand src);
1071 :
1072 : void ucomisd(XMMRegister dst, XMMRegister src);
1073 : void ucomisd(XMMRegister dst, Operand src);
1074 : void cmpltsd(XMMRegister dst, XMMRegister src);
1075 :
1076 : void movmskpd(Register dst, XMMRegister src);
1077 :
1078 : // SSE 4.1 instruction
1079 : void insertps(XMMRegister dst, XMMRegister src, byte imm8);
1080 : void insertps(XMMRegister dst, Operand src, byte imm8);
1081 : void extractps(Register dst, XMMRegister src, byte imm8);
1082 : void pextrb(Register dst, XMMRegister src, int8_t imm8);
1083 : void pextrb(Operand dst, XMMRegister src, int8_t imm8);
1084 : void pextrw(Register dst, XMMRegister src, int8_t imm8);
1085 : void pextrw(Operand dst, XMMRegister src, int8_t imm8);
1086 : void pextrd(Register dst, XMMRegister src, int8_t imm8);
1087 : void pextrd(Operand dst, XMMRegister src, int8_t imm8);
1088 : void pinsrb(XMMRegister dst, Register src, int8_t imm8);
1089 : void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
1090 : void pinsrw(XMMRegister dst, Register src, int8_t imm8);
1091 : void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
1092 : void pinsrd(XMMRegister dst, Register src, int8_t imm8);
1093 : void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
1094 :
1095 : void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
1096 : void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1097 :
1098 : void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
1099 : void cmpps(XMMRegister dst, Operand src, int8_t cmp);
1100 : void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
1101 : void cmppd(XMMRegister dst, Operand src, int8_t cmp);
1102 :
1103 : #define SSE_CMP_P(instr, imm8) \
1104 : void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
1105 : void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
1106 : void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
1107 : void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
1108 :
1109 24 : SSE_CMP_P(cmpeq, 0x0)
1110 28 : SSE_CMP_P(cmplt, 0x1)
1111 32 : SSE_CMP_P(cmple, 0x2)
1112 20 : SSE_CMP_P(cmpneq, 0x4)
1113 20 : SSE_CMP_P(cmpnlt, 0x5)
1114 20 : SSE_CMP_P(cmpnle, 0x6)
1115 :
1116 : #undef SSE_CMP_P
1117 :
1118 : void minps(XMMRegister dst, XMMRegister src);
1119 : void minps(XMMRegister dst, Operand src);
1120 : void maxps(XMMRegister dst, XMMRegister src);
1121 : void maxps(XMMRegister dst, Operand src);
1122 : void rcpps(XMMRegister dst, XMMRegister src);
1123 : void rcpps(XMMRegister dst, Operand src);
1124 : void rsqrtps(XMMRegister dst, XMMRegister src);
1125 : void rsqrtps(XMMRegister dst, Operand src);
1126 : void sqrtps(XMMRegister dst, XMMRegister src);
1127 : void sqrtps(XMMRegister dst, Operand src);
1128 : void movups(XMMRegister dst, XMMRegister src);
1129 : void movups(XMMRegister dst, Operand src);
1130 : void movups(Operand dst, XMMRegister src);
1131 : void psrldq(XMMRegister dst, uint8_t shift);
1132 : void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1133 : void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1134 : void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1135 : void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
1136 : void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1137 : void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
1138 : void cvtdq2ps(XMMRegister dst, XMMRegister src);
1139 : void cvtdq2ps(XMMRegister dst, Operand src);
1140 :
1141 : // AVX instruction
1142 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1143 0 : vfmasd(0x99, dst, src1, src2);
1144 : }
1145 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1146 0 : vfmasd(0xa9, dst, src1, src2);
1147 : }
1148 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1149 0 : vfmasd(0xb9, dst, src1, src2);
1150 : }
1151 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1152 0 : vfmasd(0x99, dst, src1, src2);
1153 : }
1154 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1155 0 : vfmasd(0xa9, dst, src1, src2);
1156 : }
1157 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1158 0 : vfmasd(0xb9, dst, src1, src2);
1159 : }
1160 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1161 0 : vfmasd(0x9b, dst, src1, src2);
1162 : }
1163 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1164 0 : vfmasd(0xab, dst, src1, src2);
1165 : }
1166 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1167 0 : vfmasd(0xbb, dst, src1, src2);
1168 : }
1169 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1170 0 : vfmasd(0x9b, dst, src1, src2);
1171 : }
1172 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1173 0 : vfmasd(0xab, dst, src1, src2);
1174 : }
1175 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1176 0 : vfmasd(0xbb, dst, src1, src2);
1177 : }
1178 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1179 0 : vfmasd(0x9d, dst, src1, src2);
1180 : }
1181 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1182 0 : vfmasd(0xad, dst, src1, src2);
1183 : }
1184 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1185 0 : vfmasd(0xbd, dst, src1, src2);
1186 : }
1187 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1188 0 : vfmasd(0x9d, dst, src1, src2);
1189 : }
1190 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1191 0 : vfmasd(0xad, dst, src1, src2);
1192 : }
1193 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1194 0 : vfmasd(0xbd, dst, src1, src2);
1195 : }
1196 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1197 0 : vfmasd(0x9f, dst, src1, src2);
1198 : }
1199 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1200 0 : vfmasd(0xaf, dst, src1, src2);
1201 : }
1202 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1203 0 : vfmasd(0xbf, dst, src1, src2);
1204 : }
1205 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1206 0 : vfmasd(0x9f, dst, src1, src2);
1207 : }
1208 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1209 0 : vfmasd(0xaf, dst, src1, src2);
1210 : }
1211 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1212 0 : vfmasd(0xbf, dst, src1, src2);
1213 : }
1214 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1215 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1216 :
1217 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1218 0 : vfmass(0x99, dst, src1, src2);
1219 : }
1220 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1221 0 : vfmass(0xa9, dst, src1, src2);
1222 : }
1223 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1224 0 : vfmass(0xb9, dst, src1, src2);
1225 : }
1226 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1227 0 : vfmass(0x99, dst, src1, src2);
1228 : }
1229 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1230 0 : vfmass(0xa9, dst, src1, src2);
1231 : }
1232 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1233 0 : vfmass(0xb9, dst, src1, src2);
1234 : }
1235 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1236 0 : vfmass(0x9b, dst, src1, src2);
1237 : }
1238 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1239 0 : vfmass(0xab, dst, src1, src2);
1240 : }
1241 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1242 0 : vfmass(0xbb, dst, src1, src2);
1243 : }
1244 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1245 0 : vfmass(0x9b, dst, src1, src2);
1246 : }
1247 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1248 0 : vfmass(0xab, dst, src1, src2);
1249 : }
1250 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1251 0 : vfmass(0xbb, dst, src1, src2);
1252 : }
1253 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1254 0 : vfmass(0x9d, dst, src1, src2);
1255 : }
1256 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1257 0 : vfmass(0xad, dst, src1, src2);
1258 : }
1259 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1260 0 : vfmass(0xbd, dst, src1, src2);
1261 : }
1262 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1263 0 : vfmass(0x9d, dst, src1, src2);
1264 : }
1265 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1266 0 : vfmass(0xad, dst, src1, src2);
1267 : }
1268 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1269 0 : vfmass(0xbd, dst, src1, src2);
1270 : }
1271 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1272 0 : vfmass(0x9f, dst, src1, src2);
1273 : }
1274 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1275 0 : vfmass(0xaf, dst, src1, src2);
1276 : }
1277 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1278 0 : vfmass(0xbf, dst, src1, src2);
1279 : }
1280 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1281 0 : vfmass(0x9f, dst, src1, src2);
1282 : }
1283 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1284 0 : vfmass(0xaf, dst, src1, src2);
1285 : }
1286 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1287 0 : vfmass(0xbf, dst, src1, src2);
1288 : }
1289 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1290 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1291 :
1292 : void vmovd(XMMRegister dst, Register src);
1293 : void vmovd(XMMRegister dst, Operand src);
1294 : void vmovd(Register dst, XMMRegister src);
1295 : void vmovq(XMMRegister dst, Register src);
1296 : void vmovq(XMMRegister dst, Operand src);
1297 : void vmovq(Register dst, XMMRegister src);
1298 :
1299 42489 : void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1300 : vsd(0x10, dst, src1, src2);
1301 42491 : }
1302 3279205 : void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
1303 2629883 : void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
1304 :
1305 : #define AVX_SP_3(instr, opcode) \
1306 : AVX_S_3(instr, opcode) \
1307 : AVX_P_3(instr, opcode)
1308 :
1309 : #define AVX_S_3(instr, opcode) \
1310 : AVX_3(instr##ss, opcode, vss) \
1311 : AVX_3(instr##sd, opcode, vsd)
1312 :
1313 : #define AVX_P_3(instr, opcode) \
1314 : AVX_3(instr##ps, opcode, vps) \
1315 : AVX_3(instr##pd, opcode, vpd)
1316 :
1317 : #define AVX_3(instr, opcode, impl) \
1318 : void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1319 : impl(opcode, dst, src1, src2); \
1320 : } \
1321 : void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1322 : impl(opcode, dst, src1, src2); \
1323 : }
1324 :
1325 1042 : AVX_SP_3(vsqrt, 0x51)
1326 161164 : AVX_SP_3(vadd, 0x58)
1327 46988 : AVX_SP_3(vsub, 0x5c)
1328 25341 : AVX_SP_3(vmul, 0x59)
1329 26205 : AVX_SP_3(vdiv, 0x5e)
1330 42 : AVX_SP_3(vmin, 0x5d)
1331 42 : AVX_SP_3(vmax, 0x5f)
1332 765 : AVX_P_3(vand, 0x54)
1333 10 : AVX_P_3(vandn, 0x55)
1334 14 : AVX_P_3(vor, 0x56)
1335 534400 : AVX_P_3(vxor, 0x57)
1336 36050 : AVX_3(vcvtsd2ss, 0x5a, vsd)
1337 20 : AVX_3(vhaddps, 0x7c, vsd)
1338 :
1339 : #undef AVX_3
1340 : #undef AVX_S_3
1341 : #undef AVX_P_3
1342 : #undef AVX_SP_3
1343 :
1344 : void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
1345 200206 : vpd(0x73, xmm2, dst, src);
1346 : emit(imm8);
1347 : }
1348 : void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
1349 233827 : vpd(0x73, xmm6, dst, src);
1350 : emit(imm8);
1351 : }
1352 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1353 9147 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1354 : }
1355 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1356 11358 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1357 : }
1358 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1359 373410 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1360 373410 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
1361 : }
1362 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1363 3865 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
1364 : }
1365 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1366 1081 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1367 1081 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
1368 : }
1369 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1370 8 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
1371 : }
1372 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1373 291 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1374 291 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
1375 : }
1376 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1377 0 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
1378 : }
1379 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1380 20224 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1381 20224 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
1382 : }
1383 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1384 1966 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
1385 : }
1386 : void vcvttss2si(Register dst, XMMRegister src) {
1387 435 : XMMRegister idst = XMMRegister::from_code(dst.code());
1388 435 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1389 : }
1390 : void vcvttss2si(Register dst, Operand src) {
1391 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1392 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1393 : }
1394 : void vcvttsd2si(Register dst, XMMRegister src) {
1395 107455 : XMMRegister idst = XMMRegister::from_code(dst.code());
1396 107455 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1397 : }
1398 : void vcvttsd2si(Register dst, Operand src) {
1399 20283 : XMMRegister idst = XMMRegister::from_code(dst.code());
1400 20283 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1401 : }
1402 : void vcvttss2siq(Register dst, XMMRegister src) {
1403 278 : XMMRegister idst = XMMRegister::from_code(dst.code());
1404 278 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1405 : }
1406 : void vcvttss2siq(Register dst, Operand src) {
1407 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1408 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1409 : }
1410 : void vcvttsd2siq(Register dst, XMMRegister src) {
1411 61928 : XMMRegister idst = XMMRegister::from_code(dst.code());
1412 61928 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1413 : }
1414 : void vcvttsd2siq(Register dst, Operand src) {
1415 10 : XMMRegister idst = XMMRegister::from_code(dst.code());
1416 10 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1417 : }
1418 : void vcvtsd2si(Register dst, XMMRegister src) {
1419 9 : XMMRegister idst = XMMRegister::from_code(dst.code());
1420 9 : vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
1421 : }
1422 : void vucomisd(XMMRegister dst, XMMRegister src) {
1423 243354 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1424 : }
1425 : void vucomisd(XMMRegister dst, Operand src) {
1426 20661 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1427 : }
1428 : void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1429 : RoundingMode mode) {
1430 479 : vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1431 479 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1432 : }
1433 : void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1434 : RoundingMode mode) {
1435 45055 : vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1436 45045 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1437 : }
1438 :
1439 : void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1440 173714 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1441 : }
1442 : void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
1443 2968452 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1444 : }
1445 :
1446 : void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1447 36686 : vss(0x10, dst, src1, src2);
1448 : }
1449 14617 : void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
1450 685099 : void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
1451 : void vucomiss(XMMRegister dst, XMMRegister src);
1452 : void vucomiss(XMMRegister dst, Operand src);
1453 : void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1454 : void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1455 :
1456 394 : void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
1457 1797 : void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
1458 6415 : void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
1459 8717 : void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
1460 160697 : void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
1461 5 : void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
1462 5 : void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
1463 : void vmovmskps(Register dst, XMMRegister src) {
1464 166 : XMMRegister idst = XMMRegister::from_code(dst.code());
1465 166 : vps(0x50, idst, xmm0, src);
1466 : }
1467 : void vmovmskpd(Register dst, XMMRegister src) {
1468 634 : XMMRegister idst = XMMRegister::from_code(dst.code());
1469 634 : vpd(0x50, idst, xmm0, src);
1470 : }
1471 : void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1472 35 : vps(0xC2, dst, src1, src2);
1473 : emit(cmp);
1474 : }
1475 : void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1476 35 : vps(0xC2, dst, src1, src2);
1477 : emit(cmp);
1478 : }
1479 : void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1480 35 : vpd(0xC2, dst, src1, src2);
1481 : emit(cmp);
1482 : }
1483 : void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1484 35 : vpd(0xC2, dst, src1, src2);
1485 : emit(cmp);
1486 : }
1487 :
1488 : #define AVX_CMP_P(instr, imm8) \
1489 : void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1490 : vcmpps(dst, src1, src2, imm8); \
1491 : } \
1492 : void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
1493 : vcmpps(dst, src1, src2, imm8); \
1494 : } \
1495 : void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1496 : vcmppd(dst, src1, src2, imm8); \
1497 : } \
1498 : void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
1499 : vcmppd(dst, src1, src2, imm8); \
1500 : }
1501 :
1502 40 : AVX_CMP_P(vcmpeq, 0x0)
1503 40 : AVX_CMP_P(vcmplt, 0x1)
1504 40 : AVX_CMP_P(vcmple, 0x2)
1505 40 : AVX_CMP_P(vcmpneq, 0x4)
1506 40 : AVX_CMP_P(vcmpnlt, 0x5)
1507 40 : AVX_CMP_P(vcmpnle, 0x6)
1508 :
1509 : #undef AVX_CMP_P
1510 :
1511 : void vlddqu(XMMRegister dst, Operand src) {
1512 5 : vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
1513 : }
1514 : void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1515 5 : vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
1516 : emit(imm8);
1517 : }
1518 : void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1519 5 : vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
1520 : emit(imm8);
1521 : }
1522 : void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1523 5 : vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
1524 : emit(imm8);
1525 : }
1526 : void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1527 64000 : vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
1528 : emit(imm8);
1529 : }
1530 : void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1531 48177 : vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
1532 : emit(imm8);
1533 : }
1534 : void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1535 5 : vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
1536 : emit(imm8);
1537 : }
1538 5 : void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
1539 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1540 5 : vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
1541 : emit(imm8);
1542 5 : }
1543 : void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
1544 5 : vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
1545 : emit(imm8);
1546 : }
1547 5 : void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
1548 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1549 5 : vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
1550 : emit(imm8);
1551 5 : }
1552 : void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
1553 5 : vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
1554 : emit(imm8);
1555 : }
1556 5 : void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
1557 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1558 5 : vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
1559 : emit(imm8);
1560 5 : }
1561 : void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
1562 5 : vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
1563 : emit(imm8);
1564 : }
1565 : void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1566 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1567 5 : vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
1568 : emit(imm8);
1569 : }
1570 : void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1571 5 : vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
1572 : emit(imm8);
1573 : }
1574 : void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1575 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1576 5 : vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
1577 : emit(imm8);
1578 : }
1579 : void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1580 5 : vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
1581 : emit(imm8);
1582 : }
1583 : void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1584 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1585 5 : vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
1586 : emit(imm8);
1587 : }
1588 : void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1589 5 : vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
1590 : emit(imm8);
1591 : }
1592 : void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1593 5 : vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
1594 : emit(imm8);
1595 : }
1596 :
1597 : void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1598 : void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1599 : void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1600 : void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1601 :
1602 : // BMI instruction
1603 : void andnq(Register dst, Register src1, Register src2) {
1604 0 : bmi1q(0xf2, dst, src1, src2);
1605 : }
1606 : void andnq(Register dst, Register src1, Operand src2) {
1607 0 : bmi1q(0xf2, dst, src1, src2);
1608 : }
1609 : void andnl(Register dst, Register src1, Register src2) {
1610 0 : bmi1l(0xf2, dst, src1, src2);
1611 : }
1612 : void andnl(Register dst, Register src1, Operand src2) {
1613 0 : bmi1l(0xf2, dst, src1, src2);
1614 : }
1615 : void bextrq(Register dst, Register src1, Register src2) {
1616 0 : bmi1q(0xf7, dst, src2, src1);
1617 : }
1618 : void bextrq(Register dst, Operand src1, Register src2) {
1619 0 : bmi1q(0xf7, dst, src2, src1);
1620 : }
1621 : void bextrl(Register dst, Register src1, Register src2) {
1622 0 : bmi1l(0xf7, dst, src2, src1);
1623 : }
1624 : void bextrl(Register dst, Operand src1, Register src2) {
1625 0 : bmi1l(0xf7, dst, src2, src1);
1626 : }
1627 0 : void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
1628 0 : void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
1629 0 : void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
1630 0 : void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
1631 0 : void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
1632 0 : void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
1633 0 : void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
1634 0 : void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
1635 0 : void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
1636 0 : void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
1637 0 : void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
1638 0 : void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
1639 : void tzcntq(Register dst, Register src);
1640 : void tzcntq(Register dst, Operand src);
1641 : void tzcntl(Register dst, Register src);
1642 : void tzcntl(Register dst, Operand src);
1643 :
1644 : void lzcntq(Register dst, Register src);
1645 : void lzcntq(Register dst, Operand src);
1646 : void lzcntl(Register dst, Register src);
1647 : void lzcntl(Register dst, Operand src);
1648 :
1649 : void popcntq(Register dst, Register src);
1650 : void popcntq(Register dst, Operand src);
1651 : void popcntl(Register dst, Register src);
1652 : void popcntl(Register dst, Operand src);
1653 :
1654 : void bzhiq(Register dst, Register src1, Register src2) {
1655 0 : bmi2q(kNone, 0xf5, dst, src2, src1);
1656 : }
1657 : void bzhiq(Register dst, Operand src1, Register src2) {
1658 0 : bmi2q(kNone, 0xf5, dst, src2, src1);
1659 : }
1660 : void bzhil(Register dst, Register src1, Register src2) {
1661 0 : bmi2l(kNone, 0xf5, dst, src2, src1);
1662 : }
1663 : void bzhil(Register dst, Operand src1, Register src2) {
1664 0 : bmi2l(kNone, 0xf5, dst, src2, src1);
1665 : }
1666 : void mulxq(Register dst1, Register dst2, Register src) {
1667 0 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1668 : }
1669 : void mulxq(Register dst1, Register dst2, Operand src) {
1670 0 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1671 : }
1672 : void mulxl(Register dst1, Register dst2, Register src) {
1673 0 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1674 : }
1675 : void mulxl(Register dst1, Register dst2, Operand src) {
1676 0 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1677 : }
1678 : void pdepq(Register dst, Register src1, Register src2) {
1679 0 : bmi2q(kF2, 0xf5, dst, src1, src2);
1680 : }
1681 : void pdepq(Register dst, Register src1, Operand src2) {
1682 0 : bmi2q(kF2, 0xf5, dst, src1, src2);
1683 : }
1684 : void pdepl(Register dst, Register src1, Register src2) {
1685 0 : bmi2l(kF2, 0xf5, dst, src1, src2);
1686 : }
1687 : void pdepl(Register dst, Register src1, Operand src2) {
1688 0 : bmi2l(kF2, 0xf5, dst, src1, src2);
1689 : }
1690 : void pextq(Register dst, Register src1, Register src2) {
1691 0 : bmi2q(kF3, 0xf5, dst, src1, src2);
1692 : }
1693 : void pextq(Register dst, Register src1, Operand src2) {
1694 0 : bmi2q(kF3, 0xf5, dst, src1, src2);
1695 : }
1696 : void pextl(Register dst, Register src1, Register src2) {
1697 0 : bmi2l(kF3, 0xf5, dst, src1, src2);
1698 : }
1699 : void pextl(Register dst, Register src1, Operand src2) {
1700 0 : bmi2l(kF3, 0xf5, dst, src1, src2);
1701 : }
1702 : void sarxq(Register dst, Register src1, Register src2) {
1703 0 : bmi2q(kF3, 0xf7, dst, src2, src1);
1704 : }
1705 : void sarxq(Register dst, Operand src1, Register src2) {
1706 0 : bmi2q(kF3, 0xf7, dst, src2, src1);
1707 : }
1708 : void sarxl(Register dst, Register src1, Register src2) {
1709 0 : bmi2l(kF3, 0xf7, dst, src2, src1);
1710 : }
1711 : void sarxl(Register dst, Operand src1, Register src2) {
1712 0 : bmi2l(kF3, 0xf7, dst, src2, src1);
1713 : }
1714 : void shlxq(Register dst, Register src1, Register src2) {
1715 0 : bmi2q(k66, 0xf7, dst, src2, src1);
1716 : }
1717 : void shlxq(Register dst, Operand src1, Register src2) {
1718 0 : bmi2q(k66, 0xf7, dst, src2, src1);
1719 : }
1720 : void shlxl(Register dst, Register src1, Register src2) {
1721 0 : bmi2l(k66, 0xf7, dst, src2, src1);
1722 : }
1723 : void shlxl(Register dst, Operand src1, Register src2) {
1724 0 : bmi2l(k66, 0xf7, dst, src2, src1);
1725 : }
1726 : void shrxq(Register dst, Register src1, Register src2) {
1727 0 : bmi2q(kF2, 0xf7, dst, src2, src1);
1728 : }
1729 : void shrxq(Register dst, Operand src1, Register src2) {
1730 0 : bmi2q(kF2, 0xf7, dst, src2, src1);
1731 : }
1732 : void shrxl(Register dst, Register src1, Register src2) {
1733 0 : bmi2l(kF2, 0xf7, dst, src2, src1);
1734 : }
1735 : void shrxl(Register dst, Operand src1, Register src2) {
1736 0 : bmi2l(kF2, 0xf7, dst, src2, src1);
1737 : }
1738 : void rorxq(Register dst, Register src, byte imm8);
1739 : void rorxq(Register dst, Operand src, byte imm8);
1740 : void rorxl(Register dst, Register src, byte imm8);
1741 : void rorxl(Register dst, Operand src, byte imm8);
1742 :
1743 : void lfence();
1744 : void pause();
1745 :
1746 : // Check the code size generated from label to here.
1747 : int SizeOfCodeGeneratedSince(Label* label) {
1748 : return pc_offset() - label->pos();
1749 : }
1750 :
1751 : // Record a deoptimization reason that can be used by a log or cpu profiler.
1752 : // Use --trace-deopt to enable.
1753 : void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1754 : int id);
1755 :
1756 :
1757 : // Writes a single word of data in the code stream.
1758 : // Used for inline tables, e.g., jump-tables.
1759 : void db(uint8_t data);
1760 : void dd(uint32_t data);
1761 : void dq(uint64_t data);
1762 : void dp(uintptr_t data) { dq(data); }
1763 : void dq(Label* label);
1764 :
1765 : // Patch entries for partial constant pool.
1766 : void PatchConstPool();
1767 :
1768 : // Check if use partial constant pool for this rmode.
1769 : static bool UseConstPoolFor(RelocInfo::Mode rmode);
1770 :
1771 : // Check if there is less than kGap bytes available in the buffer.
1772 : // If this is the case, we need to grow the buffer before emitting
1773 : // an instruction or relocation information.
1774 : inline bool buffer_overflow() const {
1775 350008094 : return pc_ >= reloc_info_writer.pos() - kGap;
1776 : }
1777 :
1778 : // Get the number of bytes available in the buffer.
1779 : inline int available_space() const {
1780 : return static_cast<int>(reloc_info_writer.pos() - pc_);
1781 : }
1782 :
1783 : static bool IsNop(Address addr);
1784 :
1785 : // Avoid overflows for displacements etc.
1786 : static constexpr int kMaximalBufferSize = 512 * MB;
1787 :
1788 : byte byte_at(int pos) { return buffer_start_[pos]; }
1789 1815205 : void set_byte_at(int pos, byte value) { buffer_start_[pos] = value; }
1790 :
1791 : #if defined(V8_OS_WIN_X64)
1792 : win64_unwindinfo::BuiltinUnwindInfo GetUnwindInfo() const;
1793 : #endif
1794 :
1795 : protected:
1796 : // Call near indirect
1797 : void call(Operand operand);
1798 :
1799 : private:
1800 : Address addr_at(int pos) {
1801 60350455 : return reinterpret_cast<Address>(buffer_start_ + pos);
1802 : }
1803 : uint32_t long_at(int pos) {
1804 : return ReadUnalignedValue<uint32_t>(addr_at(pos));
1805 : }
1806 : void long_at_put(int pos, uint32_t x) {
1807 : WriteUnalignedValue(addr_at(pos), x);
1808 : }
1809 :
1810 : // code emission
1811 : void GrowBuffer();
1812 :
1813 476036796 : void emit(byte x) { *pc_++ = x; }
1814 : inline void emitl(uint32_t x);
1815 : inline void emitq(uint64_t x);
1816 : inline void emitw(uint16_t x);
1817 : inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
1818 : inline void emit(Immediate x);
1819 : inline void emit(Immediate64 x);
1820 :
1821 : // Emits a REX prefix that encodes a 64-bit operand size and
1822 : // the top bit of both register codes.
1823 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1824 : // REX.W is set.
1825 : inline void emit_rex_64(XMMRegister reg, Register rm_reg);
1826 : inline void emit_rex_64(Register reg, XMMRegister rm_reg);
1827 : inline void emit_rex_64(Register reg, Register rm_reg);
1828 : inline void emit_rex_64(XMMRegister reg, XMMRegister rm_reg);
1829 :
1830 : // Emits a REX prefix that encodes a 64-bit operand size and
1831 : // the top bit of the destination, index, and base register codes.
1832 : // The high bit of reg is used for REX.R, the high bit of op's base
1833 : // register is used for REX.B, and the high bit of op's index register
1834 : // is used for REX.X. REX.W is set.
1835 : inline void emit_rex_64(Register reg, Operand op);
1836 : inline void emit_rex_64(XMMRegister reg, Operand op);
1837 :
1838 : // Emits a REX prefix that encodes a 64-bit operand size and
1839 : // the top bit of the register code.
1840 : // The high bit of register is used for REX.B.
1841 : // REX.W is set and REX.R and REX.X are clear.
1842 : inline void emit_rex_64(Register rm_reg);
1843 :
1844 : // Emits a REX prefix that encodes a 64-bit operand size and
1845 : // the top bit of the index and base register codes.
1846 : // The high bit of op's base register is used for REX.B, and the high
1847 : // bit of op's index register is used for REX.X.
1848 : // REX.W is set and REX.R clear.
1849 : inline void emit_rex_64(Operand op);
1850 :
1851 : // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
1852 : void emit_rex_64() { emit(0x48); }
1853 :
1854 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1855 : // REX.W is clear.
1856 : inline void emit_rex_32(Register reg, Register rm_reg);
1857 :
1858 : // The high bit of reg is used for REX.R, the high bit of op's base
1859 : // register is used for REX.B, and the high bit of op's index register
1860 : // is used for REX.X. REX.W is cleared.
1861 : inline void emit_rex_32(Register reg, Operand op);
1862 :
1863 : // High bit of rm_reg goes to REX.B.
1864 : // REX.W, REX.R and REX.X are clear.
1865 : inline void emit_rex_32(Register rm_reg);
1866 :
1867 : // High bit of base goes to REX.B and high bit of index to REX.X.
1868 : // REX.W and REX.R are clear.
1869 : inline void emit_rex_32(Operand op);
1870 :
1871 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1872 : // REX.W is cleared. If no REX bits are set, no byte is emitted.
1873 : inline void emit_optional_rex_32(Register reg, Register rm_reg);
1874 :
1875 : // The high bit of reg is used for REX.R, the high bit of op's base
1876 : // register is used for REX.B, and the high bit of op's index register
1877 : // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
1878 : // is emitted.
1879 : inline void emit_optional_rex_32(Register reg, Operand op);
1880 :
1881 : // As for emit_optional_rex_32(Register, Register), except that
1882 : // the registers are XMM registers.
1883 : inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
1884 :
1885 : // As for emit_optional_rex_32(Register, Register), except that
1886 : // one of the registers is an XMM registers.
1887 : inline void emit_optional_rex_32(XMMRegister reg, Register base);
1888 :
1889 : // As for emit_optional_rex_32(Register, Register), except that
1890 : // one of the registers is an XMM registers.
1891 : inline void emit_optional_rex_32(Register reg, XMMRegister base);
1892 :
1893 : // As for emit_optional_rex_32(Register, Operand), except that
1894 : // the register is an XMM register.
1895 : inline void emit_optional_rex_32(XMMRegister reg, Operand op);
1896 :
1897 : // Optionally do as emit_rex_32(Register) if the register number has
1898 : // the high bit set.
1899 : inline void emit_optional_rex_32(Register rm_reg);
1900 : inline void emit_optional_rex_32(XMMRegister rm_reg);
1901 :
1902 : // Optionally do as emit_rex_32(Operand) if the operand register
1903 : // numbers have a high bit set.
1904 : inline void emit_optional_rex_32(Operand op);
1905 :
1906 : void emit_rex(int size) {
1907 0 : if (size == kInt64Size) {
1908 : emit_rex_64();
1909 : } else {
1910 : DCHECK_EQ(size, kInt32Size);
1911 : }
1912 : }
1913 :
1914 : template<class P1>
1915 : void emit_rex(P1 p1, int size) {
1916 71496887 : if (size == kInt64Size) {
1917 : emit_rex_64(p1);
1918 : } else {
1919 : DCHECK_EQ(size, kInt32Size);
1920 : emit_optional_rex_32(p1);
1921 : }
1922 : }
1923 :
1924 : template<class P1, class P2>
1925 21315572 : void emit_rex(P1 p1, P2 p2, int size) {
1926 70812055 : if (size == kInt64Size) {
1927 : emit_rex_64(p1, p2);
1928 : } else {
1929 : DCHECK_EQ(size, kInt32Size);
1930 : emit_optional_rex_32(p1, p2);
1931 : }
1932 21315572 : }
1933 :
1934 : // Emit vex prefix
1935 : void emit_vex2_byte0() { emit(0xc5); }
1936 : inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
1937 : SIMDPrefix pp);
1938 : void emit_vex3_byte0() { emit(0xc4); }
1939 : inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
1940 : inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
1941 : inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
1942 : SIMDPrefix pp);
1943 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
1944 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1945 : VexW w);
1946 : inline void emit_vex_prefix(Register reg, Register v, Register rm,
1947 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1948 : VexW w);
1949 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
1950 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1951 : VexW w);
1952 : inline void emit_vex_prefix(Register reg, Register v, Operand rm,
1953 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1954 : VexW w);
1955 :
1956 : // Emit the ModR/M byte, and optionally the SIB byte and
1957 : // 1- or 4-byte offset for a memory operand. Also encodes
1958 : // the second operand of the operation, a register or operation
1959 : // subcode, into the reg field of the ModR/M byte.
1960 : void emit_operand(Register reg, Operand adr) {
1961 55400079 : emit_operand(reg.low_bits(), adr);
1962 : }
1963 :
1964 : // Emit the ModR/M byte, and optionally the SIB byte and
1965 : // 1- or 4-byte offset for a memory operand. Also used to encode
1966 : // a three-bit opcode extension into the ModR/M byte.
1967 : void emit_operand(int rm, Operand adr);
1968 :
1969 : // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
1970 : void emit_modrm(Register reg, Register rm_reg) {
1971 27824637 : emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
1972 : }
1973 :
1974 : // Emit a ModR/M byte with an operation subcode in the reg field and
1975 : // a register in the rm_reg field.
1976 : void emit_modrm(int code, Register rm_reg) {
1977 : DCHECK(is_uint3(code));
1978 72965004 : emit(0xC0 | code << 3 | rm_reg.low_bits());
1979 : }
1980 :
1981 : // Emit the code-object-relative offset of the label's position
1982 : inline void emit_code_relative_offset(Label* label);
1983 :
1984 : // The first argument is the reg field, the second argument is the r/m field.
1985 : void emit_sse_operand(XMMRegister dst, XMMRegister src);
1986 : void emit_sse_operand(XMMRegister reg, Operand adr);
1987 : void emit_sse_operand(Register reg, Operand adr);
1988 : void emit_sse_operand(XMMRegister dst, Register src);
1989 : void emit_sse_operand(Register dst, XMMRegister src);
1990 : void emit_sse_operand(XMMRegister dst);
1991 :
1992 : // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
1993 : // AND, OR, XOR, or CMP. The encodings of these operations are all
1994 : // similar, differing just in the opcode or in the reg field of the
1995 : // ModR/M byte.
1996 : void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
1997 : void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
1998 : void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
1999 : void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
2000 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2001 : void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
2002 : void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
2003 : // Operate on a byte in memory or register.
2004 : void immediate_arithmetic_op_8(byte subcode,
2005 : Register dst,
2006 : Immediate src);
2007 : void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
2008 : // Operate on a word in memory or register.
2009 : void immediate_arithmetic_op_16(byte subcode,
2010 : Register dst,
2011 : Immediate src);
2012 : void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
2013 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2014 : void immediate_arithmetic_op(byte subcode,
2015 : Register dst,
2016 : Immediate src,
2017 : int size);
2018 : void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
2019 : int size);
2020 :
2021 : // Emit machine code for a shift operation.
2022 : void shift(Operand dst, Immediate shift_amount, int subcode, int size);
2023 : void shift(Register dst, Immediate shift_amount, int subcode, int size);
2024 : // Shift dst by cl % 64 bits.
2025 : void shift(Register dst, int subcode, int size);
2026 : void shift(Operand dst, int subcode, int size);
2027 :
2028 : void emit_farith(int b1, int b2, int i);
2029 :
2030 : // labels
2031 : // void print(Label* L);
2032 : void bind_to(Label* L, int pos);
2033 :
2034 : // record reloc info for current pc_
2035 : void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
2036 :
2037 : // Arithmetics
2038 : void emit_add(Register dst, Register src, int size) {
2039 790192 : arithmetic_op(0x03, dst, src, size);
2040 : }
2041 :
2042 : void emit_add(Register dst, Immediate src, int size) {
2043 2382591 : immediate_arithmetic_op(0x0, dst, src, size);
2044 : }
2045 :
2046 : void emit_add(Register dst, Operand src, int size) {
2047 14290 : arithmetic_op(0x03, dst, src, size);
2048 : }
2049 :
2050 : void emit_add(Operand dst, Register src, int size) {
2051 164286 : arithmetic_op(0x1, src, dst, size);
2052 : }
2053 :
2054 : void emit_add(Operand dst, Immediate src, int size) {
2055 6486 : immediate_arithmetic_op(0x0, dst, src, size);
2056 : }
2057 :
2058 : void emit_and(Register dst, Register src, int size) {
2059 777415 : arithmetic_op(0x23, dst, src, size);
2060 : }
2061 :
2062 : void emit_and(Register dst, Operand src, int size) {
2063 4705 : arithmetic_op(0x23, dst, src, size);
2064 : }
2065 :
2066 : void emit_and(Operand dst, Register src, int size) {
2067 : arithmetic_op(0x21, src, dst, size);
2068 : }
2069 :
2070 : void emit_and(Register dst, Immediate src, int size) {
2071 1126011 : immediate_arithmetic_op(0x4, dst, src, size);
2072 : }
2073 :
2074 : void emit_and(Operand dst, Immediate src, int size) {
2075 0 : immediate_arithmetic_op(0x4, dst, src, size);
2076 : }
2077 :
2078 : void emit_cmp(Register dst, Register src, int size) {
2079 1667441 : arithmetic_op(0x3B, dst, src, size);
2080 : }
2081 :
2082 : void emit_cmp(Register dst, Operand src, int size) {
2083 724816 : arithmetic_op(0x3B, dst, src, size);
2084 : }
2085 :
2086 : void emit_cmp(Operand dst, Register src, int size) {
2087 1645279 : arithmetic_op(0x39, src, dst, size);
2088 : }
2089 :
2090 : void emit_cmp(Register dst, Immediate src, int size) {
2091 3043887 : immediate_arithmetic_op(0x7, dst, src, size);
2092 : }
2093 :
2094 : void emit_cmp(Operand dst, Immediate src, int size) {
2095 179612 : immediate_arithmetic_op(0x7, dst, src, size);
2096 : }
2097 :
2098 : // Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
2099 : // src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
2100 : // operation is only atomic if prefixed by the lock instruction.
2101 : void emit_cmpxchg(Operand dst, Register src, int size);
2102 :
2103 : void emit_dec(Register dst, int size);
2104 : void emit_dec(Operand dst, int size);
2105 :
2106 : // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
2107 : // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
2108 : // when size is 32.
2109 : void emit_idiv(Register src, int size);
2110 : void emit_div(Register src, int size);
2111 :
2112 : // Signed multiply instructions.
2113 : // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
2114 : void emit_imul(Register src, int size);
2115 : void emit_imul(Operand src, int size);
2116 : void emit_imul(Register dst, Register src, int size);
2117 : void emit_imul(Register dst, Operand src, int size);
2118 : void emit_imul(Register dst, Register src, Immediate imm, int size);
2119 : void emit_imul(Register dst, Operand src, Immediate imm, int size);
2120 :
2121 : void emit_inc(Register dst, int size);
2122 : void emit_inc(Operand dst, int size);
2123 :
2124 : void emit_lea(Register dst, Operand src, int size);
2125 :
2126 : void emit_mov(Register dst, Operand src, int size);
2127 : void emit_mov(Register dst, Register src, int size);
2128 : void emit_mov(Operand dst, Register src, int size);
2129 : void emit_mov(Register dst, Immediate value, int size);
2130 : void emit_mov(Operand dst, Immediate value, int size);
2131 : void emit_mov(Register dst, Immediate64 value, int size);
2132 :
2133 : void emit_movzxb(Register dst, Operand src, int size);
2134 : void emit_movzxb(Register dst, Register src, int size);
2135 : void emit_movzxw(Register dst, Operand src, int size);
2136 : void emit_movzxw(Register dst, Register src, int size);
2137 :
2138 : void emit_neg(Register dst, int size);
2139 : void emit_neg(Operand dst, int size);
2140 :
2141 : void emit_not(Register dst, int size);
2142 : void emit_not(Operand dst, int size);
2143 :
2144 : void emit_or(Register dst, Register src, int size) {
2145 168336 : arithmetic_op(0x0B, dst, src, size);
2146 : }
2147 :
2148 : void emit_or(Register dst, Operand src, int size) {
2149 7477 : arithmetic_op(0x0B, dst, src, size);
2150 : }
2151 :
2152 : void emit_or(Operand dst, Register src, int size) {
2153 4 : arithmetic_op(0x9, src, dst, size);
2154 : }
2155 :
2156 : void emit_or(Register dst, Immediate src, int size) {
2157 29562 : immediate_arithmetic_op(0x1, dst, src, size);
2158 : }
2159 :
2160 : void emit_or(Operand dst, Immediate src, int size) {
2161 0 : immediate_arithmetic_op(0x1, dst, src, size);
2162 : }
2163 :
2164 : void emit_repmovs(int size);
2165 :
2166 : void emit_sbb(Register dst, Register src, int size) {
2167 5 : arithmetic_op(0x1b, dst, src, size);
2168 : }
2169 :
2170 : void emit_sub(Register dst, Register src, int size) {
2171 215621 : arithmetic_op(0x2B, dst, src, size);
2172 : }
2173 :
2174 : void emit_sub(Register dst, Immediate src, int size) {
2175 4030120 : immediate_arithmetic_op(0x5, dst, src, size);
2176 : }
2177 :
2178 : void emit_sub(Register dst, Operand src, int size) {
2179 172254 : arithmetic_op(0x2B, dst, src, size);
2180 : }
2181 :
2182 : void emit_sub(Operand dst, Register src, int size) {
2183 164290 : arithmetic_op(0x29, src, dst, size);
2184 : }
2185 :
2186 : void emit_sub(Operand dst, Immediate src, int size) {
2187 117 : immediate_arithmetic_op(0x5, dst, src, size);
2188 : }
2189 :
2190 : void emit_test(Register dst, Register src, int size);
2191 : void emit_test(Register reg, Immediate mask, int size);
2192 : void emit_test(Operand op, Register reg, int size);
2193 : void emit_test(Operand op, Immediate mask, int size);
2194 : void emit_test(Register reg, Operand op, int size) {
2195 365 : return emit_test(op, reg, size);
2196 : }
2197 :
2198 : void emit_xchg(Register dst, Register src, int size);
2199 : void emit_xchg(Register dst, Operand src, int size);
2200 :
2201 2926548 : void emit_xor(Register dst, Register src, int size) {
2202 2933417 : if (size == kInt64Size && dst.code() == src.code()) {
2203 : // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
2204 : // there is no need to make this a 64 bit operation.
2205 3735 : arithmetic_op(0x33, dst, src, kInt32Size);
2206 : } else {
2207 2922813 : arithmetic_op(0x33, dst, src, size);
2208 : }
2209 2926555 : }
2210 :
2211 : void emit_xor(Register dst, Operand src, int size) {
2212 530 : arithmetic_op(0x33, dst, src, size);
2213 : }
2214 :
2215 : void emit_xor(Register dst, Immediate src, int size) {
2216 22788 : immediate_arithmetic_op(0x6, dst, src, size);
2217 : }
2218 :
2219 : void emit_xor(Operand dst, Immediate src, int size) {
2220 0 : immediate_arithmetic_op(0x6, dst, src, size);
2221 : }
2222 :
2223 : void emit_xor(Operand dst, Register src, int size) {
2224 4 : arithmetic_op(0x31, src, dst, size);
2225 : }
2226 :
2227 : // Most BMI instructions are similar.
2228 : void bmi1q(byte op, Register reg, Register vreg, Register rm);
2229 : void bmi1q(byte op, Register reg, Register vreg, Operand rm);
2230 : void bmi1l(byte op, Register reg, Register vreg, Register rm);
2231 : void bmi1l(byte op, Register reg, Register vreg, Operand rm);
2232 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2233 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2234 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2235 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2236 :
2237 : // record the position of jmp/jcc instruction
2238 : void record_farjmp_position(Label* L, int pos);
2239 :
2240 : bool is_optimizable_farjmp(int idx);
2241 :
2242 : void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
2243 :
2244 : int WriteCodeComments();
2245 :
2246 : friend class EnsureSpace;
2247 : friend class RegExpMacroAssemblerX64;
2248 :
2249 : // code generation
2250 : RelocInfoWriter reloc_info_writer;
2251 :
2252 : // Internal reference positions, required for (potential) patching in
2253 : // GrowBuffer(); contains only those internal references whose labels
2254 : // are already bound.
2255 : std::deque<int> internal_reference_positions_;
2256 :
2257 : // Variables for this instance of assembler
2258 : int farjmp_num_ = 0;
2259 : std::deque<int> farjmp_positions_;
2260 : std::map<Label*, std::vector<int>> label_farjmp_maps_;
2261 :
2262 : ConstPool constpool_;
2263 :
2264 : friend class ConstPool;
2265 :
2266 : #if defined(V8_OS_WIN_X64)
2267 : std::unique_ptr<win64_unwindinfo::XdataEncoder> xdata_encoder_;
2268 : #endif
2269 : };
2270 :
2271 :
2272 : // Helper class that ensures that there is enough space for generating
2273 : // instructions and relocation information. The constructor makes
2274 : // sure that there is enough space and (in debug mode) the destructor
2275 : // checks that we did not generate too much.
2276 : class EnsureSpace {
2277 : public:
2278 : explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
2279 350008094 : if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
2280 : #ifdef DEBUG
2281 : space_before_ = assembler_->available_space();
2282 : #endif
2283 : }
2284 :
2285 : #ifdef DEBUG
2286 : ~EnsureSpace() {
2287 : int bytes_generated = space_before_ - assembler_->available_space();
2288 : DCHECK(bytes_generated < assembler_->kGap);
2289 : }
2290 : #endif
2291 :
2292 : private:
2293 : Assembler* assembler_;
2294 : #ifdef DEBUG
2295 : int space_before_;
2296 : #endif
2297 : };
2298 :
2299 : } // namespace internal
2300 : } // namespace v8
2301 :
2302 : #endif // V8_X64_ASSEMBLER_X64_H_
|