Line data Source code
1 : // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 : // All Rights Reserved.
3 : //
4 : // Redistribution and use in source and binary forms, with or without
5 : // modification, are permitted provided that the following conditions are
6 : // met:
7 : //
8 : // - Redistributions of source code must retain the above copyright notice,
9 : // this list of conditions and the following disclaimer.
10 : //
11 : // - Redistribution in binary form must reproduce the above copyright
12 : // notice, this list of conditions and the following disclaimer in the
13 : // documentation and/or other materials provided with the distribution.
14 : //
15 : // - Neither the name of Sun Microsystems or the names of contributors may
16 : // be used to endorse or promote products derived from this software without
17 : // specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 : // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 : // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 : // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 : // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 : // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 : // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 : // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 : // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 : // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 : // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : // The original source code covered by the above license above has been
32 : // modified significantly by Google Inc.
33 : // Copyright 2012 the V8 project authors. All rights reserved.
34 :
35 : // A lightweight X64 Assembler.
36 :
37 : #ifndef V8_X64_ASSEMBLER_X64_H_
38 : #define V8_X64_ASSEMBLER_X64_H_
39 :
40 : #include <deque>
41 : #include <map>
42 : #include <vector>
43 :
44 : #include "src/assembler.h"
45 : #include "src/label.h"
46 : #include "src/objects/smi.h"
47 : #include "src/x64/constants-x64.h"
48 : #include "src/x64/register-x64.h"
49 : #include "src/x64/sse-instr.h"
50 :
51 : namespace v8 {
52 : namespace internal {
53 :
54 : class SafepointTableBuilder;
55 :
56 : // Utility functions
57 :
58 : enum Condition {
59 : // any value < 0 is considered no_condition
60 : no_condition = -1,
61 :
62 : overflow = 0,
63 : no_overflow = 1,
64 : below = 2,
65 : above_equal = 3,
66 : equal = 4,
67 : not_equal = 5,
68 : below_equal = 6,
69 : above = 7,
70 : negative = 8,
71 : positive = 9,
72 : parity_even = 10,
73 : parity_odd = 11,
74 : less = 12,
75 : greater_equal = 13,
76 : less_equal = 14,
77 : greater = 15,
78 :
79 : // Fake conditions that are handled by the
80 : // opcodes using them.
81 : always = 16,
82 : never = 17,
83 : // aliases
84 : carry = below,
85 : not_carry = above_equal,
86 : zero = equal,
87 : not_zero = not_equal,
88 : sign = negative,
89 : not_sign = positive,
90 : last_condition = greater
91 : };
92 :
93 :
94 : // Returns the equivalent of !cc.
95 : // Negation of the default no_condition (-1) results in a non-default
96 : // no_condition value (-2). As long as tests for no_condition check
97 : // for condition < 0, this will work as expected.
98 0 : inline Condition NegateCondition(Condition cc) {
99 456706 : return static_cast<Condition>(cc ^ 1);
100 : }
101 :
102 :
103 : enum RoundingMode {
104 : kRoundToNearest = 0x0,
105 : kRoundDown = 0x1,
106 : kRoundUp = 0x2,
107 : kRoundToZero = 0x3
108 : };
109 :
110 :
111 : // -----------------------------------------------------------------------------
112 : // Machine instruction Immediates
113 :
114 : class Immediate {
115 : public:
116 9175623 : explicit constexpr Immediate(int32_t value) : value_(value) {}
117 : explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
118 : : value_(value), rmode_(rmode) {}
119 : explicit Immediate(Smi value)
120 : : value_(static_cast<int32_t>(static_cast<intptr_t>(value.ptr()))) {
121 : DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
122 : }
123 :
124 : private:
125 : const int32_t value_;
126 : const RelocInfo::Mode rmode_ = RelocInfo::NONE;
127 :
128 : friend class Assembler;
129 : };
130 : ASSERT_TRIVIALLY_COPYABLE(Immediate);
131 : static_assert(sizeof(Immediate) <= kSystemPointerSize,
132 : "Immediate must be small enough to pass it by value");
133 :
134 : class Immediate64 {
135 : public:
136 : explicit constexpr Immediate64(int64_t value) : value_(value) {}
137 : explicit constexpr Immediate64(int64_t value, RelocInfo::Mode rmode)
138 : : value_(value), rmode_(rmode) {}
139 : explicit constexpr Immediate64(Address value, RelocInfo::Mode rmode)
140 47969621 : : value_(static_cast<int64_t>(value)), rmode_(rmode) {}
141 :
142 : private:
143 : const int64_t value_;
144 : const RelocInfo::Mode rmode_ = RelocInfo::NONE;
145 :
146 : friend class Assembler;
147 : };
148 :
149 : // -----------------------------------------------------------------------------
150 : // Machine instruction Operands
151 :
152 : enum ScaleFactor : int8_t {
153 : times_1 = 0,
154 : times_2 = 1,
155 : times_4 = 2,
156 : times_8 = 3,
157 : times_int_size = times_4,
158 : times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
159 : times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
160 : };
161 :
162 : class V8_EXPORT_PRIVATE Operand {
163 : public:
164 59013796 : struct Data {
165 : byte rex = 0;
166 : byte buf[9];
167 : byte len = 1; // number of bytes of buf_ in use.
168 : int8_t addend; // for rip + offset + addend.
169 : };
170 :
171 : // [base + disp/r]
172 : Operand(Register base, int32_t disp);
173 :
174 : // [base + index*scale + disp/r]
175 : Operand(Register base,
176 : Register index,
177 : ScaleFactor scale,
178 : int32_t disp);
179 :
180 : // [index*scale + disp/r]
181 : Operand(Register index,
182 : ScaleFactor scale,
183 : int32_t disp);
184 :
185 : // Offset from existing memory operand.
186 : // Offset is added to existing displacement as 32-bit signed values and
187 : // this must not overflow.
188 : Operand(Operand base, int32_t offset);
189 :
190 : // [rip + disp/r]
191 : explicit Operand(Label* label, int addend = 0);
192 :
193 : Operand(const Operand&) V8_NOEXCEPT = default;
194 :
195 : // Checks whether either base or index register is the given register.
196 : // Does not check the "reg" part of the Operand.
197 : bool AddressUsesRegister(Register reg) const;
198 :
199 : // Queries related to the size of the generated instruction.
200 : // Whether the generated instruction will have a REX prefix.
201 : bool requires_rex() const { return data_.rex != 0; }
202 : // Size of the ModR/M, SIB and displacement parts of the generated
203 : // instruction.
204 : int operand_size() const { return data_.len; }
205 :
206 : const Data& data() const { return data_; }
207 :
208 : private:
209 : const Data data_;
210 : };
211 : ASSERT_TRIVIALLY_COPYABLE(Operand);
212 : static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
213 : "Operand must be small enough to pass it by value");
214 :
215 : #define ASSEMBLER_INSTRUCTION_LIST(V) \
216 : V(add) \
217 : V(and) \
218 : V(cmp) \
219 : V(cmpxchg) \
220 : V(dec) \
221 : V(idiv) \
222 : V(div) \
223 : V(imul) \
224 : V(inc) \
225 : V(lea) \
226 : V(mov) \
227 : V(movzxb) \
228 : V(movzxw) \
229 : V(neg) \
230 : V(not) \
231 : V(or) \
232 : V(repmovs) \
233 : V(sbb) \
234 : V(sub) \
235 : V(test) \
236 : V(xchg) \
237 : V(xor)
238 :
239 : // Shift instructions on operands/registers with kInt32Size and kInt64Size.
240 : #define SHIFT_INSTRUCTION_LIST(V) \
241 : V(rol, 0x0) \
242 : V(ror, 0x1) \
243 : V(rcl, 0x2) \
244 : V(rcr, 0x3) \
245 : V(shl, 0x4) \
246 : V(shr, 0x5) \
247 : V(sar, 0x7)
248 :
249 : // Partial Constant Pool
250 : // Different from complete constant pool (like arm does), partial constant pool
251 : // only takes effects for shareable constants in order to reduce code size.
252 : // Partial constant pool does not emit constant pool entries at the end of each
253 : // code object. Instead, it keeps the first shareable constant inlined in the
254 : // instructions and uses rip-relative memory loadings for the same constants in
255 : // subsequent instructions. These rip-relative memory loadings will target at
256 : // the position of the first inlined constant. For example:
257 : //
258 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
259 : // …
260 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
261 : // …
262 : //
263 : // turns into
264 : //
265 : // REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
266 : // …
267 : // REX.W movq r10,[rip+0xffffff96] ; 7 bytes
268 : // …
269 :
270 0 : class ConstPool {
271 : public:
272 35720261 : explicit ConstPool(Assembler* assm) : assm_(assm) {}
273 : // Returns true when partial constant pool is valid for this entry.
274 : bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode);
275 : bool IsEmpty() const { return entries_.empty(); }
276 :
277 : void PatchEntries();
278 : // Discard any pending pool entries.
279 : void Clear();
280 :
281 : private:
282 : // Adds a shared entry to entries_. Returns true if this is not the first time
283 : // we add this entry, false otherwise.
284 : bool AddSharedEntry(uint64_t data, int offset);
285 :
286 : // Check if the instruction is a rip-relative move.
287 : bool IsMoveRipRelative(Address instr);
288 :
289 : Assembler* assm_;
290 :
291 : // Values, pc offsets of entries.
292 : typedef std::multimap<uint64_t, int> EntryMap;
293 : EntryMap entries_;
294 :
295 : // Number of bytes taken up by the displacement of rip-relative addressing.
296 : static constexpr int kRipRelativeDispSize = 4; // 32-bit displacement.
297 : // Distance between the address of the displacement in the rip-relative move
298 : // instruction and the head address of the instruction.
299 : static constexpr int kMoveRipRelativeDispOffset =
300 : 3; // REX Opcode ModRM Displacement
301 : // Distance between the address of the imm64 in the 'movq reg, imm64'
302 : // instruction and the head address of the instruction.
303 : static constexpr int kMoveImm64Offset = 2; // REX Opcode imm64
304 : // A mask for rip-relative move instruction.
305 : static constexpr uint32_t kMoveRipRelativeMask = 0x00C7FFFB;
306 : // The bits for a rip-relative move instruction after mask.
307 : static constexpr uint32_t kMoveRipRelativeInstr = 0x00058B48;
308 : };
309 :
310 : class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
311 : private:
312 : // We check before assembling an instruction that there is sufficient
313 : // space to write an instruction and its relocation information.
314 : // The relocation writer's position must be kGap bytes above the end of
315 : // the generated instructions. This leaves enough space for the
316 : // longest possible x64 instruction, 15 bytes, and the longest possible
317 : // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
318 : // (There is a 15 byte limit on x64 instruction length that rules out some
319 : // otherwise valid instructions.)
320 : // This allows for a single, fast space check per instruction.
321 : static constexpr int kGap = 32;
322 :
323 : public:
324 : // Create an assembler. Instructions and relocation information are emitted
325 : // into a buffer, with the instructions starting from the beginning and the
326 : // relocation information starting from the end of the buffer. See CodeDesc
327 : // for a detailed comment on the layout (globals.h).
328 : //
329 : // If the provided buffer is nullptr, the assembler allocates and grows its
330 : // own buffer. Otherwise it takes ownership of the provided buffer.
331 : explicit Assembler(const AssemblerOptions&,
332 : std::unique_ptr<AssemblerBuffer> = {});
333 71440250 : ~Assembler() override = default;
334 :
335 : // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
336 : static constexpr int kNoHandlerTable = 0;
337 : static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
338 : void GetCode(Isolate* isolate, CodeDesc* desc,
339 : SafepointTableBuilder* safepoint_table_builder,
340 : int handler_table_offset);
341 :
342 : // Convenience wrapper for code without safepoint or handler tables.
343 99736 : void GetCode(Isolate* isolate, CodeDesc* desc) {
344 311448 : GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
345 99736 : }
346 :
347 : void FinalizeJumpOptimizationInfo();
348 :
349 : // Unused on this architecture.
350 : void MaybeEmitOutOfLineConstantPool() {}
351 :
352 : // Read/Modify the code target in the relative branch/call instruction at pc.
353 : // On the x64 architecture, we use relative jumps with a 32-bit displacement
354 : // to jump to other Code objects in the Code space in the heap.
355 : // Jumps to C functions are done indirectly through a 64-bit register holding
356 : // the absolute address of the target.
357 : // These functions convert between absolute Addresses of Code objects and
358 : // the relative displacements stored in the code.
359 : // The isolate argument is unused (and may be nullptr) when skipping flushing.
360 : static inline Address target_address_at(Address pc, Address constant_pool);
361 : static inline void set_target_address_at(
362 : Address pc, Address constant_pool, Address target,
363 : ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
364 :
365 : // Return the code target address at a call site from the return address
366 : // of that call in the instruction stream.
367 : static inline Address target_address_from_return_address(Address pc);
368 :
369 : // This sets the branch destination (which is in the instruction on x64).
370 : // This is for calls and branches within generated code.
371 : inline static void deserialization_set_special_target_at(
372 : Address instruction_payload, Code code, Address target);
373 :
374 : // Get the size of the special target encoded at 'instruction_payload'.
375 : inline static int deserialization_special_target_size(
376 : Address instruction_payload);
377 :
378 : // This sets the internal reference at the pc.
379 : inline static void deserialization_set_target_internal_reference_at(
380 : Address pc, Address target,
381 : RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
382 :
383 : inline Handle<Code> code_target_object_handle_at(Address pc);
384 : inline Address runtime_entry_at(Address pc);
385 :
386 : // Number of bytes taken up by the branch target in the code.
387 : static constexpr int kSpecialTargetSize = 4; // 32-bit displacement.
388 : // Distance between the address of the code target in the call instruction
389 : // and the return address pushed on the stack.
390 : static constexpr int kCallTargetAddressOffset = 4; // 32-bit displacement.
391 :
392 : // One byte opcode for test eax,0xXXXXXXXX.
393 : static constexpr byte kTestEaxByte = 0xA9;
394 : // One byte opcode for test al, 0xXX.
395 : static constexpr byte kTestAlByte = 0xA8;
396 : // One byte opcode for nop.
397 : static constexpr byte kNopByte = 0x90;
398 :
399 : // One byte prefix for a short conditional jump.
400 : static constexpr byte kJccShortPrefix = 0x70;
401 : static constexpr byte kJncShortOpcode = kJccShortPrefix | not_carry;
402 : static constexpr byte kJcShortOpcode = kJccShortPrefix | carry;
403 : static constexpr byte kJnzShortOpcode = kJccShortPrefix | not_zero;
404 : static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
405 :
406 : // VEX prefix encodings.
407 : enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
408 : enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
409 : enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
410 : enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
411 :
412 : // ---------------------------------------------------------------------------
413 : // Code generation
414 : //
415 : // Function names correspond one-to-one to x64 instruction mnemonics.
416 : // Unless specified otherwise, instructions operate on 64-bit operands.
417 : //
418 : // If we need versions of an assembly instruction that operate on different
419 : // width arguments, we add a single-letter suffix specifying the width.
420 : // This is done for the following instructions: mov, cmp, inc, dec,
421 : // add, sub, and test.
422 : // There are no versions of these instructions without the suffix.
423 : // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
424 : // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
425 : // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
426 : // - Instructions on 64-bit (quadword) operands/registers use 'q'.
427 : // - Instructions on operands/registers with pointer size use 'p'.
428 :
429 : #define DECLARE_INSTRUCTION(instruction) \
430 : template <class P1> \
431 : void instruction##_tagged(P1 p1) { \
432 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
433 : /* TODO(ishell): change to kTaggedSize */ \
434 : emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
435 : } \
436 : \
437 : template <class P1> \
438 : void instruction##l(P1 p1) { \
439 : emit_##instruction(p1, kInt32Size); \
440 : } \
441 : \
442 : template <class P1> \
443 : void instruction##q(P1 p1) { \
444 : emit_##instruction(p1, kInt64Size); \
445 : } \
446 : \
447 : template <class P1, class P2> \
448 : void instruction##_tagged(P1 p1, P2 p2) { \
449 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
450 : /* TODO(ishell): change to kTaggedSize */ \
451 : emit_##instruction(p1, p2, \
452 : COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
453 : } \
454 : \
455 : template <class P1, class P2> \
456 : void instruction##l(P1 p1, P2 p2) { \
457 : emit_##instruction(p1, p2, kInt32Size); \
458 : } \
459 : \
460 : template <class P1, class P2> \
461 : void instruction##q(P1 p1, P2 p2) { \
462 : emit_##instruction(p1, p2, kInt64Size); \
463 : } \
464 : \
465 : template <class P1, class P2, class P3> \
466 : void instruction##l(P1 p1, P2 p2, P3 p3) { \
467 : emit_##instruction(p1, p2, p3, kInt32Size); \
468 : } \
469 : \
470 : template <class P1, class P2, class P3> \
471 : void instruction##q(P1 p1, P2 p2, P3 p3) { \
472 : emit_##instruction(p1, p2, p3, kInt64Size); \
473 : }
474 151804099 : ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
475 : #undef DECLARE_INSTRUCTION
476 :
477 : // Insert the smallest number of nop instructions
478 : // possible to align the pc offset to a multiple
479 : // of m, where m must be a power of 2.
480 : void Align(int m);
481 : // Insert the smallest number of zero bytes possible to align the pc offset
482 : // to a mulitple of m. m must be a power of 2 (>= 2).
483 : void DataAlign(int m);
484 : void Nop(int bytes = 1);
485 : // Aligns code to something that's optimal for a jump target for the platform.
486 : void CodeTargetAlign();
487 :
488 : // Stack
489 : void pushfq();
490 : void popfq();
491 :
492 : void pushq(Immediate value);
493 : // Push a 32 bit integer, and guarantee that it is actually pushed as a
494 : // 32 bit value, the normal push will optimize the 8 bit case.
495 : void pushq_imm32(int32_t imm32);
496 : void pushq(Register src);
497 : void pushq(Operand src);
498 :
499 : void popq(Register dst);
500 : void popq(Operand dst);
501 :
502 : void enter(Immediate size);
503 : void leave();
504 :
505 : // Moves
506 : void movb(Register dst, Operand src);
507 : void movb(Register dst, Immediate imm);
508 : void movb(Operand dst, Register src);
509 : void movb(Operand dst, Immediate imm);
510 :
511 : // Move the low 16 bits of a 64-bit register value to a 16-bit
512 : // memory location.
513 : void movw(Register dst, Operand src);
514 : void movw(Operand dst, Register src);
515 : void movw(Operand dst, Immediate imm);
516 :
517 : // Move the offset of the label location relative to the current
518 : // position (after the move) to the destination.
519 : void movl(Operand dst, Label* src);
520 :
521 : // Load a heap number into a register.
522 : // The heap number will not be allocated and embedded into the code right
523 : // away. Instead, we emit the load of a dummy object. Later, when calling
524 : // Assembler::GetCode, the heap number will be allocated and the code will be
525 : // patched by replacing the dummy with the actual object. The RelocInfo for
526 : // the embedded object gets already recorded correctly when emitting the dummy
527 : // move.
528 : void movq_heap_number(Register dst, double value);
529 :
530 : void movq_string(Register dst, const StringConstantBase* str);
531 :
532 : // Loads a 64-bit immediate into a register.
533 3002115 : void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
534 1229714 : void movq(Register dst, uint64_t value) {
535 1229714 : movq(dst, Immediate64(static_cast<int64_t>(value)));
536 1229722 : }
537 :
538 : void movsxbl(Register dst, Register src);
539 : void movsxbl(Register dst, Operand src);
540 : void movsxbq(Register dst, Register src);
541 : void movsxbq(Register dst, Operand src);
542 : void movsxwl(Register dst, Register src);
543 : void movsxwl(Register dst, Operand src);
544 : void movsxwq(Register dst, Register src);
545 : void movsxwq(Register dst, Operand src);
546 : void movsxlq(Register dst, Register src);
547 : void movsxlq(Register dst, Operand src);
548 :
549 : // Repeated moves.
550 :
551 : void repmovsb();
552 : void repmovsw();
553 : void repmovsl() { emit_repmovs(kInt32Size); }
554 : void repmovsq() { emit_repmovs(kInt64Size); }
555 :
556 : // Instruction to load from an immediate 64-bit pointer into RAX.
557 : void load_rax(Address value, RelocInfo::Mode rmode);
558 : void load_rax(ExternalReference ext);
559 :
560 : // Conditional moves.
561 : void cmovq(Condition cc, Register dst, Register src);
562 : void cmovq(Condition cc, Register dst, Operand src);
563 : void cmovl(Condition cc, Register dst, Register src);
564 : void cmovl(Condition cc, Register dst, Operand src);
565 :
566 336 : void cmpb(Register dst, Immediate src) {
567 7117 : immediate_arithmetic_op_8(0x7, dst, src);
568 336 : }
569 :
570 : void cmpb_al(Immediate src);
571 :
572 : void cmpb(Register dst, Register src) {
573 3358 : arithmetic_op_8(0x3A, dst, src);
574 : }
575 :
576 453 : void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
577 :
578 460 : void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
579 :
580 168 : void cmpb(Operand dst, Immediate src) {
581 15100 : immediate_arithmetic_op_8(0x7, dst, src);
582 168 : }
583 :
584 : void cmpw(Operand dst, Immediate src) {
585 276081 : immediate_arithmetic_op_16(0x7, dst, src);
586 : }
587 :
588 : void cmpw(Register dst, Immediate src) {
589 154089 : immediate_arithmetic_op_16(0x7, dst, src);
590 : }
591 :
592 60 : void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
593 :
594 : void cmpw(Register dst, Register src) {
595 448 : arithmetic_op_16(0x3B, dst, src);
596 : }
597 :
598 455 : void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
599 :
600 0 : void testb(Register reg, Operand op) { testb(op, reg); }
601 :
602 0 : void testw(Register reg, Operand op) { testw(op, reg); }
603 :
604 : void andb(Register dst, Immediate src) {
605 : immediate_arithmetic_op_8(0x4, dst, src);
606 : }
607 :
608 : void decb(Register dst);
609 : void decb(Operand dst);
610 :
611 : // Lock prefix.
612 : void lock();
613 :
614 : void xchgb(Register reg, Operand op);
615 : void xchgw(Register reg, Operand op);
616 :
617 : void cmpxchgb(Operand dst, Register src);
618 : void cmpxchgw(Operand dst, Register src);
619 :
620 : // Sign-extends rax into rdx:rax.
621 : void cqo();
622 : // Sign-extends eax into edx:eax.
623 : void cdq();
624 :
625 : // Multiply eax by src, put the result in edx:eax.
626 : void mull(Register src);
627 : void mull(Operand src);
628 : // Multiply rax by src, put the result in rdx:rax.
629 : void mulq(Register src);
630 :
631 : #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
632 : void instruction##l(Register dst, Immediate imm8) { \
633 : shift(dst, imm8, subcode, kInt32Size); \
634 : } \
635 : \
636 : void instruction##q(Register dst, Immediate imm8) { \
637 : shift(dst, imm8, subcode, kInt64Size); \
638 : } \
639 : \
640 : void instruction##l(Operand dst, Immediate imm8) { \
641 : shift(dst, imm8, subcode, kInt32Size); \
642 : } \
643 : \
644 : void instruction##q(Operand dst, Immediate imm8) { \
645 : shift(dst, imm8, subcode, kInt64Size); \
646 : } \
647 : \
648 : void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
649 : \
650 : void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
651 : \
652 : void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
653 : \
654 : void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
655 1323097 : SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
656 : #undef DECLARE_SHIFT_INSTRUCTION
657 :
658 : // Shifts dst:src left by cl bits, affecting only dst.
659 : void shld(Register dst, Register src);
660 :
661 : // Shifts src:dst right by cl bits, affecting only dst.
662 : void shrd(Register dst, Register src);
663 :
664 : void store_rax(Address dst, RelocInfo::Mode mode);
665 : void store_rax(ExternalReference ref);
666 :
667 : void subb(Register dst, Immediate src) {
668 2334 : immediate_arithmetic_op_8(0x5, dst, src);
669 : }
670 :
671 : void sub_sp_32(uint32_t imm);
672 :
673 : void testb(Register dst, Register src);
674 : void testb(Register reg, Immediate mask);
675 : void testb(Operand op, Immediate mask);
676 : void testb(Operand op, Register reg);
677 :
678 : void testw(Register dst, Register src);
679 : void testw(Register reg, Immediate mask);
680 : void testw(Operand op, Immediate mask);
681 : void testw(Operand op, Register reg);
682 :
683 : // Bit operations.
684 : void bswapl(Register dst);
685 : void bswapq(Register dst);
686 : void btq(Operand dst, Register src);
687 : void btsq(Operand dst, Register src);
688 : void btsq(Register dst, Immediate imm8);
689 : void btrq(Register dst, Immediate imm8);
690 : void bsrq(Register dst, Register src);
691 : void bsrq(Register dst, Operand src);
692 : void bsrl(Register dst, Register src);
693 : void bsrl(Register dst, Operand src);
694 : void bsfq(Register dst, Register src);
695 : void bsfq(Register dst, Operand src);
696 : void bsfl(Register dst, Register src);
697 : void bsfl(Register dst, Operand src);
698 :
699 : // Miscellaneous
700 : void clc();
701 : void cld();
702 : void cpuid();
703 : void hlt();
704 : void int3();
705 : void nop();
706 : void ret(int imm16);
707 : void ud2();
708 : void setcc(Condition cc, Register reg);
709 :
710 : void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
711 : void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
712 : void pblendw(XMMRegister dst, Operand src, uint8_t mask);
713 : void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
714 : void palignr(XMMRegister dst, Operand src, uint8_t mask);
715 : void palignr(XMMRegister dst, XMMRegister src, uint8_t mask);
716 :
717 : // Label operations & relative jumps (PPUM Appendix D)
718 : //
719 : // Takes a branch opcode (cc) and a label (L) and generates
720 : // either a backward branch or a forward branch and links it
721 : // to the label fixup chain. Usage:
722 : //
723 : // Label L; // unbound label
724 : // j(cc, &L); // forward branch to unbound label
725 : // bind(&L); // bind label to the current pc
726 : // j(cc, &L); // backward branch to bound label
727 : // bind(&L); // illegal: a label may be bound only once
728 : //
729 : // Note: The same Label can be used for forward and backward branches
730 : // but it may be bound only once.
731 :
732 : void bind(Label* L); // binds an unbound label L to the current code position
733 :
734 : // Calls
735 : // Call near relative 32-bit displacement, relative to next instruction.
736 : void call(Label* L);
737 : void call(Address entry, RelocInfo::Mode rmode);
738 : void near_call(Address entry, RelocInfo::Mode rmode);
739 : void near_jmp(Address entry, RelocInfo::Mode rmode);
740 : void call(Handle<Code> target,
741 : RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
742 :
743 : // Calls directly to the given address using a relative offset.
744 : // Should only ever be used in Code objects for calls within the
745 : // same Code object. Should not be used when generating new code (use labels),
746 : // but only when patching existing code.
747 : void call(Address target);
748 :
749 : // Call near absolute indirect, address in register
750 : void call(Register adr);
751 :
752 : // Jumps
753 : // Jump short or near relative.
754 : // Use a 32-bit signed displacement.
755 : // Unconditional jump to L
756 : void jmp(Label* L, Label::Distance distance = Label::kFar);
757 : void jmp(Handle<Code> target, RelocInfo::Mode rmode);
758 :
759 : // Jump near absolute indirect (r64)
760 : void jmp(Register adr);
761 : void jmp(Operand src);
762 :
763 : // Conditional jumps
764 : void j(Condition cc,
765 : Label* L,
766 : Label::Distance distance = Label::kFar);
767 : void j(Condition cc, Address entry, RelocInfo::Mode rmode);
768 : void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
769 :
770 : // Floating-point operations
771 : void fld(int i);
772 :
773 : void fld1();
774 : void fldz();
775 : void fldpi();
776 : void fldln2();
777 :
778 : void fld_s(Operand adr);
779 : void fld_d(Operand adr);
780 :
781 : void fstp_s(Operand adr);
782 : void fstp_d(Operand adr);
783 : void fstp(int index);
784 :
785 : void fild_s(Operand adr);
786 : void fild_d(Operand adr);
787 :
788 : void fist_s(Operand adr);
789 :
790 : void fistp_s(Operand adr);
791 : void fistp_d(Operand adr);
792 :
793 : void fisttp_s(Operand adr);
794 : void fisttp_d(Operand adr);
795 :
796 : void fabs();
797 : void fchs();
798 :
799 : void fadd(int i);
800 : void fsub(int i);
801 : void fmul(int i);
802 : void fdiv(int i);
803 :
804 : void fisub_s(Operand adr);
805 :
806 : void faddp(int i = 1);
807 : void fsubp(int i = 1);
808 : void fsubrp(int i = 1);
809 : void fmulp(int i = 1);
810 : void fdivp(int i = 1);
811 : void fprem();
812 : void fprem1();
813 :
814 : void fxch(int i = 1);
815 : void fincstp();
816 : void ffree(int i = 0);
817 :
818 : void ftst();
819 : void fucomp(int i);
820 : void fucompp();
821 : void fucomi(int i);
822 : void fucomip();
823 :
824 : void fcompp();
825 : void fnstsw_ax();
826 : void fwait();
827 : void fnclex();
828 :
829 : void fsin();
830 : void fcos();
831 : void fptan();
832 : void fyl2x();
833 : void f2xm1();
834 : void fscale();
835 : void fninit();
836 :
837 : void frndint();
838 :
839 : void sahf();
840 :
841 : // SSE instructions
842 : void addss(XMMRegister dst, XMMRegister src);
843 : void addss(XMMRegister dst, Operand src);
844 : void subss(XMMRegister dst, XMMRegister src);
845 : void subss(XMMRegister dst, Operand src);
846 : void mulss(XMMRegister dst, XMMRegister src);
847 : void mulss(XMMRegister dst, Operand src);
848 : void divss(XMMRegister dst, XMMRegister src);
849 : void divss(XMMRegister dst, Operand src);
850 :
851 : void maxss(XMMRegister dst, XMMRegister src);
852 : void maxss(XMMRegister dst, Operand src);
853 : void minss(XMMRegister dst, XMMRegister src);
854 : void minss(XMMRegister dst, Operand src);
855 :
856 : void sqrtss(XMMRegister dst, XMMRegister src);
857 : void sqrtss(XMMRegister dst, Operand src);
858 :
859 : void ucomiss(XMMRegister dst, XMMRegister src);
860 : void ucomiss(XMMRegister dst, Operand src);
861 : void movaps(XMMRegister dst, XMMRegister src);
862 :
863 : // Don't use this unless it's important to keep the
864 : // top half of the destination register unchanged.
865 : // Use movaps when moving float values and movd for integer
866 : // values in xmm registers.
867 : void movss(XMMRegister dst, XMMRegister src);
868 :
869 : void movss(XMMRegister dst, Operand src);
870 : void movss(Operand dst, XMMRegister src);
871 : void shufps(XMMRegister dst, XMMRegister src, byte imm8);
872 :
873 : void cvttss2si(Register dst, Operand src);
874 : void cvttss2si(Register dst, XMMRegister src);
875 : void cvtlsi2ss(XMMRegister dst, Operand src);
876 : void cvtlsi2ss(XMMRegister dst, Register src);
877 :
878 : void andps(XMMRegister dst, XMMRegister src);
879 : void andps(XMMRegister dst, Operand src);
880 : void orps(XMMRegister dst, XMMRegister src);
881 : void orps(XMMRegister dst, Operand src);
882 : void xorps(XMMRegister dst, XMMRegister src);
883 : void xorps(XMMRegister dst, Operand src);
884 :
885 : void addps(XMMRegister dst, XMMRegister src);
886 : void addps(XMMRegister dst, Operand src);
887 : void subps(XMMRegister dst, XMMRegister src);
888 : void subps(XMMRegister dst, Operand src);
889 : void mulps(XMMRegister dst, XMMRegister src);
890 : void mulps(XMMRegister dst, Operand src);
891 : void divps(XMMRegister dst, XMMRegister src);
892 : void divps(XMMRegister dst, Operand src);
893 :
894 : void movmskps(Register dst, XMMRegister src);
895 :
896 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
897 : SIMDPrefix pp, LeadingOpcode m, VexW w);
898 : void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
899 : SIMDPrefix pp, LeadingOpcode m, VexW w);
900 :
901 : // SSE2 instructions
902 : void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
903 : byte opcode);
904 : void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
905 : byte opcode);
906 : #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
907 : void instruction(XMMRegister dst, XMMRegister src) { \
908 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
909 : } \
910 : void instruction(XMMRegister dst, Operand src) { \
911 : sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
912 : }
913 :
914 3991 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
915 : #undef DECLARE_SSE2_INSTRUCTION
916 :
917 : #define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
918 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
919 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
920 : } \
921 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
922 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
923 : }
924 :
925 276301 : SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
926 : #undef DECLARE_SSE2_AVX_INSTRUCTION
927 :
928 : // SSE3
929 : void lddqu(XMMRegister dst, Operand src);
930 :
931 : // SSSE3
932 : void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
933 : byte escape2, byte opcode);
934 : void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
935 : byte escape2, byte opcode);
936 :
937 : #define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
938 : opcode) \
939 : void instruction(XMMRegister dst, XMMRegister src) { \
940 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
941 : } \
942 : void instruction(XMMRegister dst, Operand src) { \
943 : ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
944 : }
945 :
946 2750 : SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
947 : #undef DECLARE_SSSE3_INSTRUCTION
948 :
949 : // SSE4
950 : void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
951 : byte escape2, byte opcode);
952 : void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
953 : byte escape2, byte opcode);
954 : #define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
955 : opcode) \
956 : void instruction(XMMRegister dst, XMMRegister src) { \
957 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
958 : } \
959 : void instruction(XMMRegister dst, Operand src) { \
960 : sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
961 : }
962 :
963 592 : SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
964 : #undef DECLARE_SSE4_INSTRUCTION
965 :
966 : #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
967 : opcode) \
968 : void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
969 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
970 : } \
971 : void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
972 : vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
973 : }
974 :
975 90 : SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
976 150 : SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
977 : #undef DECLARE_SSE34_AVX_INSTRUCTION
978 :
979 : void movd(XMMRegister dst, Register src);
980 : void movd(XMMRegister dst, Operand src);
981 : void movd(Register dst, XMMRegister src);
982 : void movq(XMMRegister dst, Register src);
983 : void movq(Register dst, XMMRegister src);
984 : void movq(XMMRegister dst, XMMRegister src);
985 :
986 : // Don't use this unless it's important to keep the
987 : // top half of the destination register unchanged.
988 : // Use movapd when moving double values and movq for integer
989 : // values in xmm registers.
990 : void movsd(XMMRegister dst, XMMRegister src);
991 :
992 : void movsd(Operand dst, XMMRegister src);
993 : void movsd(XMMRegister dst, Operand src);
994 :
995 : void movdqa(Operand dst, XMMRegister src);
996 : void movdqa(XMMRegister dst, Operand src);
997 :
998 : void movdqu(Operand dst, XMMRegister src);
999 : void movdqu(XMMRegister dst, Operand src);
1000 :
1001 : void movapd(XMMRegister dst, XMMRegister src);
1002 : void movupd(XMMRegister dst, Operand src);
1003 : void movupd(Operand dst, XMMRegister src);
1004 :
1005 : void psllq(XMMRegister reg, byte imm8);
1006 : void psrlq(XMMRegister reg, byte imm8);
1007 : void psllw(XMMRegister reg, byte imm8);
1008 : void pslld(XMMRegister reg, byte imm8);
1009 : void psrlw(XMMRegister reg, byte imm8);
1010 : void psrld(XMMRegister reg, byte imm8);
1011 : void psraw(XMMRegister reg, byte imm8);
1012 : void psrad(XMMRegister reg, byte imm8);
1013 :
1014 : void cvttsd2si(Register dst, Operand src);
1015 : void cvttsd2si(Register dst, XMMRegister src);
1016 : void cvttss2siq(Register dst, XMMRegister src);
1017 : void cvttss2siq(Register dst, Operand src);
1018 : void cvttsd2siq(Register dst, XMMRegister src);
1019 : void cvttsd2siq(Register dst, Operand src);
1020 : void cvttps2dq(XMMRegister dst, Operand src);
1021 : void cvttps2dq(XMMRegister dst, XMMRegister src);
1022 :
1023 : void cvtlsi2sd(XMMRegister dst, Operand src);
1024 : void cvtlsi2sd(XMMRegister dst, Register src);
1025 :
1026 : void cvtqsi2ss(XMMRegister dst, Operand src);
1027 : void cvtqsi2ss(XMMRegister dst, Register src);
1028 :
1029 : void cvtqsi2sd(XMMRegister dst, Operand src);
1030 : void cvtqsi2sd(XMMRegister dst, Register src);
1031 :
1032 :
1033 : void cvtss2sd(XMMRegister dst, XMMRegister src);
1034 : void cvtss2sd(XMMRegister dst, Operand src);
1035 : void cvtsd2ss(XMMRegister dst, XMMRegister src);
1036 : void cvtsd2ss(XMMRegister dst, Operand src);
1037 :
1038 : void cvtsd2si(Register dst, XMMRegister src);
1039 : void cvtsd2siq(Register dst, XMMRegister src);
1040 :
1041 : void addsd(XMMRegister dst, XMMRegister src);
1042 : void addsd(XMMRegister dst, Operand src);
1043 : void subsd(XMMRegister dst, XMMRegister src);
1044 : void subsd(XMMRegister dst, Operand src);
1045 : void mulsd(XMMRegister dst, XMMRegister src);
1046 : void mulsd(XMMRegister dst, Operand src);
1047 : void divsd(XMMRegister dst, XMMRegister src);
1048 : void divsd(XMMRegister dst, Operand src);
1049 :
1050 : void maxsd(XMMRegister dst, XMMRegister src);
1051 : void maxsd(XMMRegister dst, Operand src);
1052 : void minsd(XMMRegister dst, XMMRegister src);
1053 : void minsd(XMMRegister dst, Operand src);
1054 :
1055 : void andpd(XMMRegister dst, XMMRegister src);
1056 : void andpd(XMMRegister dst, Operand src);
1057 : void orpd(XMMRegister dst, XMMRegister src);
1058 : void orpd(XMMRegister dst, Operand src);
1059 : void xorpd(XMMRegister dst, XMMRegister src);
1060 : void xorpd(XMMRegister dst, Operand src);
1061 : void sqrtsd(XMMRegister dst, XMMRegister src);
1062 : void sqrtsd(XMMRegister dst, Operand src);
1063 :
1064 : void haddps(XMMRegister dst, XMMRegister src);
1065 : void haddps(XMMRegister dst, Operand src);
1066 :
1067 : void ucomisd(XMMRegister dst, XMMRegister src);
1068 : void ucomisd(XMMRegister dst, Operand src);
1069 : void cmpltsd(XMMRegister dst, XMMRegister src);
1070 :
1071 : void movmskpd(Register dst, XMMRegister src);
1072 :
1073 : // SSE 4.1 instruction
1074 : void insertps(XMMRegister dst, XMMRegister src, byte imm8);
1075 : void extractps(Register dst, XMMRegister src, byte imm8);
1076 : void pextrb(Register dst, XMMRegister src, int8_t imm8);
1077 : void pextrb(Operand dst, XMMRegister src, int8_t imm8);
1078 : void pextrw(Register dst, XMMRegister src, int8_t imm8);
1079 : void pextrw(Operand dst, XMMRegister src, int8_t imm8);
1080 : void pextrd(Register dst, XMMRegister src, int8_t imm8);
1081 : void pextrd(Operand dst, XMMRegister src, int8_t imm8);
1082 : void pinsrb(XMMRegister dst, Register src, int8_t imm8);
1083 : void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
1084 : void pinsrw(XMMRegister dst, Register src, int8_t imm8);
1085 : void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
1086 : void pinsrd(XMMRegister dst, Register src, int8_t imm8);
1087 : void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
1088 :
1089 : void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
1090 : void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1091 :
1092 : void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
1093 : void cmpps(XMMRegister dst, Operand src, int8_t cmp);
1094 : void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
1095 : void cmppd(XMMRegister dst, Operand src, int8_t cmp);
1096 :
1097 : #define SSE_CMP_P(instr, imm8) \
1098 : void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
1099 : void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
1100 : void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
1101 : void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
1102 :
1103 24 : SSE_CMP_P(cmpeq, 0x0)
1104 28 : SSE_CMP_P(cmplt, 0x1)
1105 32 : SSE_CMP_P(cmple, 0x2)
1106 20 : SSE_CMP_P(cmpneq, 0x4)
1107 20 : SSE_CMP_P(cmpnlt, 0x5)
1108 20 : SSE_CMP_P(cmpnle, 0x6)
1109 :
1110 : #undef SSE_CMP_P
1111 :
1112 : void minps(XMMRegister dst, XMMRegister src);
1113 : void minps(XMMRegister dst, Operand src);
1114 : void maxps(XMMRegister dst, XMMRegister src);
1115 : void maxps(XMMRegister dst, Operand src);
1116 : void rcpps(XMMRegister dst, XMMRegister src);
1117 : void rcpps(XMMRegister dst, Operand src);
1118 : void rsqrtps(XMMRegister dst, XMMRegister src);
1119 : void rsqrtps(XMMRegister dst, Operand src);
1120 : void sqrtps(XMMRegister dst, XMMRegister src);
1121 : void sqrtps(XMMRegister dst, Operand src);
1122 : void movups(XMMRegister dst, XMMRegister src);
1123 : void movups(XMMRegister dst, Operand src);
1124 : void movups(Operand dst, XMMRegister src);
1125 : void psrldq(XMMRegister dst, uint8_t shift);
1126 : void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1127 : void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1128 : void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1129 : void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
1130 : void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1131 : void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
1132 : void cvtdq2ps(XMMRegister dst, XMMRegister src);
1133 : void cvtdq2ps(XMMRegister dst, Operand src);
1134 :
1135 : // AVX instruction
1136 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1137 14 : vfmasd(0x99, dst, src1, src2);
1138 : }
1139 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1140 14 : vfmasd(0xa9, dst, src1, src2);
1141 : }
1142 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1143 14 : vfmasd(0xb9, dst, src1, src2);
1144 : }
1145 : void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1146 14 : vfmasd(0x99, dst, src1, src2);
1147 : }
1148 : void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1149 14 : vfmasd(0xa9, dst, src1, src2);
1150 : }
1151 : void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1152 14 : vfmasd(0xb9, dst, src1, src2);
1153 : }
1154 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1155 9 : vfmasd(0x9b, dst, src1, src2);
1156 : }
1157 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1158 9 : vfmasd(0xab, dst, src1, src2);
1159 : }
1160 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1161 9 : vfmasd(0xbb, dst, src1, src2);
1162 : }
1163 : void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1164 9 : vfmasd(0x9b, dst, src1, src2);
1165 : }
1166 : void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1167 9 : vfmasd(0xab, dst, src1, src2);
1168 : }
1169 : void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1170 9 : vfmasd(0xbb, dst, src1, src2);
1171 : }
1172 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1173 9 : vfmasd(0x9d, dst, src1, src2);
1174 : }
1175 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1176 9 : vfmasd(0xad, dst, src1, src2);
1177 : }
1178 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1179 9 : vfmasd(0xbd, dst, src1, src2);
1180 : }
1181 : void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1182 9 : vfmasd(0x9d, dst, src1, src2);
1183 : }
1184 : void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1185 9 : vfmasd(0xad, dst, src1, src2);
1186 : }
1187 : void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1188 9 : vfmasd(0xbd, dst, src1, src2);
1189 : }
1190 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1191 9 : vfmasd(0x9f, dst, src1, src2);
1192 : }
1193 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1194 9 : vfmasd(0xaf, dst, src1, src2);
1195 : }
1196 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1197 9 : vfmasd(0xbf, dst, src1, src2);
1198 : }
1199 : void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1200 9 : vfmasd(0x9f, dst, src1, src2);
1201 : }
1202 : void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1203 9 : vfmasd(0xaf, dst, src1, src2);
1204 : }
1205 : void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1206 9 : vfmasd(0xbf, dst, src1, src2);
1207 : }
1208 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1209 : void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1210 :
1211 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1212 9 : vfmass(0x99, dst, src1, src2);
1213 : }
1214 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1215 9 : vfmass(0xa9, dst, src1, src2);
1216 : }
1217 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1218 9 : vfmass(0xb9, dst, src1, src2);
1219 : }
1220 : void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1221 9 : vfmass(0x99, dst, src1, src2);
1222 : }
1223 : void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1224 9 : vfmass(0xa9, dst, src1, src2);
1225 : }
1226 : void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1227 9 : vfmass(0xb9, dst, src1, src2);
1228 : }
1229 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1230 9 : vfmass(0x9b, dst, src1, src2);
1231 : }
1232 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1233 9 : vfmass(0xab, dst, src1, src2);
1234 : }
1235 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1236 9 : vfmass(0xbb, dst, src1, src2);
1237 : }
1238 : void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1239 9 : vfmass(0x9b, dst, src1, src2);
1240 : }
1241 : void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1242 9 : vfmass(0xab, dst, src1, src2);
1243 : }
1244 : void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1245 9 : vfmass(0xbb, dst, src1, src2);
1246 : }
1247 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1248 9 : vfmass(0x9d, dst, src1, src2);
1249 : }
1250 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1251 9 : vfmass(0xad, dst, src1, src2);
1252 : }
1253 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1254 9 : vfmass(0xbd, dst, src1, src2);
1255 : }
1256 : void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1257 9 : vfmass(0x9d, dst, src1, src2);
1258 : }
1259 : void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1260 9 : vfmass(0xad, dst, src1, src2);
1261 : }
1262 : void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1263 9 : vfmass(0xbd, dst, src1, src2);
1264 : }
1265 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1266 9 : vfmass(0x9f, dst, src1, src2);
1267 : }
1268 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1269 9 : vfmass(0xaf, dst, src1, src2);
1270 : }
1271 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1272 9 : vfmass(0xbf, dst, src1, src2);
1273 : }
1274 : void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1275 9 : vfmass(0x9f, dst, src1, src2);
1276 : }
1277 : void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1278 9 : vfmass(0xaf, dst, src1, src2);
1279 : }
1280 : void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1281 9 : vfmass(0xbf, dst, src1, src2);
1282 : }
1283 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1284 : void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1285 :
1286 : void vmovd(XMMRegister dst, Register src);
1287 : void vmovd(XMMRegister dst, Operand src);
1288 : void vmovd(Register dst, XMMRegister src);
1289 : void vmovq(XMMRegister dst, Register src);
1290 : void vmovq(XMMRegister dst, Operand src);
1291 : void vmovq(Register dst, XMMRegister src);
1292 :
1293 102887 : void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1294 : vsd(0x10, dst, src1, src2);
1295 102889 : }
1296 2838837 : void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
1297 3359408 : void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
1298 :
1299 : #define AVX_SP_3(instr, opcode) \
1300 : AVX_S_3(instr, opcode) \
1301 : AVX_P_3(instr, opcode)
1302 :
1303 : #define AVX_S_3(instr, opcode) \
1304 : AVX_3(instr##ss, opcode, vss) \
1305 : AVX_3(instr##sd, opcode, vsd)
1306 :
1307 : #define AVX_P_3(instr, opcode) \
1308 : AVX_3(instr##ps, opcode, vps) \
1309 : AVX_3(instr##pd, opcode, vpd)
1310 :
1311 : #define AVX_3(instr, opcode, impl) \
1312 : void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1313 : impl(opcode, dst, src1, src2); \
1314 : } \
1315 : void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1316 : impl(opcode, dst, src1, src2); \
1317 : }
1318 :
1319 1280 : AVX_SP_3(vsqrt, 0x51)
1320 163646 : AVX_SP_3(vadd, 0x58)
1321 45184 : AVX_SP_3(vsub, 0x5c)
1322 27526 : AVX_SP_3(vmul, 0x59)
1323 26954 : AVX_SP_3(vdiv, 0x5e)
1324 42 : AVX_SP_3(vmin, 0x5d)
1325 42 : AVX_SP_3(vmax, 0x5f)
1326 779 : AVX_P_3(vand, 0x54)
1327 14 : AVX_P_3(vor, 0x56)
1328 528207 : AVX_P_3(vxor, 0x57)
1329 36050 : AVX_3(vcvtsd2ss, 0x5a, vsd)
1330 20 : AVX_3(vhaddps, 0x7c, vsd)
1331 :
1332 : #undef AVX_3
1333 : #undef AVX_S_3
1334 : #undef AVX_P_3
1335 : #undef AVX_SP_3
1336 :
1337 : void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
1338 186136 : vpd(0x73, xmm2, dst, src);
1339 : emit(imm8);
1340 : }
1341 : void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
1342 215909 : vpd(0x73, xmm6, dst, src);
1343 : emit(imm8);
1344 : }
1345 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1346 10110 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1347 : }
1348 : void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1349 11310 : vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1350 : }
1351 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1352 366866 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1353 366866 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
1354 : }
1355 : void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1356 3921 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
1357 : }
1358 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1359 1122 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1360 1122 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
1361 : }
1362 : void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1363 8 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
1364 : }
1365 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1366 404 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1367 404 : vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
1368 : }
1369 : void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1370 0 : vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
1371 : }
1372 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1373 19592 : XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1374 19592 : vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
1375 : }
1376 : void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1377 2082 : vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
1378 : }
1379 460 : void vcvttss2si(Register dst, XMMRegister src) {
1380 460 : XMMRegister idst = XMMRegister::from_code(dst.code());
1381 460 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1382 460 : }
1383 0 : void vcvttss2si(Register dst, Operand src) {
1384 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1385 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1386 0 : }
1387 104840 : void vcvttsd2si(Register dst, XMMRegister src) {
1388 104840 : XMMRegister idst = XMMRegister::from_code(dst.code());
1389 104840 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1390 104840 : }
1391 20322 : void vcvttsd2si(Register dst, Operand src) {
1392 20322 : XMMRegister idst = XMMRegister::from_code(dst.code());
1393 20322 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1394 20322 : }
1395 364 : void vcvttss2siq(Register dst, XMMRegister src) {
1396 364 : XMMRegister idst = XMMRegister::from_code(dst.code());
1397 364 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1398 364 : }
1399 0 : void vcvttss2siq(Register dst, Operand src) {
1400 0 : XMMRegister idst = XMMRegister::from_code(dst.code());
1401 0 : vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1402 0 : }
1403 61720 : void vcvttsd2siq(Register dst, XMMRegister src) {
1404 61720 : XMMRegister idst = XMMRegister::from_code(dst.code());
1405 61720 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1406 61723 : }
1407 10 : void vcvttsd2siq(Register dst, Operand src) {
1408 10 : XMMRegister idst = XMMRegister::from_code(dst.code());
1409 10 : vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1410 10 : }
1411 9 : void vcvtsd2si(Register dst, XMMRegister src) {
1412 9 : XMMRegister idst = XMMRegister::from_code(dst.code());
1413 9 : vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
1414 9 : }
1415 : void vucomisd(XMMRegister dst, XMMRegister src) {
1416 238434 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1417 : }
1418 : void vucomisd(XMMRegister dst, Operand src) {
1419 20703 : vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1420 : }
1421 591 : void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1422 : RoundingMode mode) {
1423 591 : vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1424 590 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1425 590 : }
1426 43974 : void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1427 : RoundingMode mode) {
1428 43974 : vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1429 43976 : emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
1430 43976 : }
1431 :
1432 : void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1433 235502 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1434 : }
1435 247237 : void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
1436 3236674 : vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1437 247237 : }
1438 :
1439 : void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1440 96446 : vss(0x10, dst, src1, src2);
1441 : }
1442 25341 : void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
1443 684169 : void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
1444 : void vucomiss(XMMRegister dst, XMMRegister src);
1445 : void vucomiss(XMMRegister dst, Operand src);
1446 : void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1447 : void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1448 :
1449 362 : void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
1450 2053 : void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
1451 6039 : void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
1452 5951 : void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
1453 129827 : void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
1454 5 : void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
1455 5 : void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
1456 : void vmovmskps(Register dst, XMMRegister src) {
1457 196 : XMMRegister idst = XMMRegister::from_code(dst.code());
1458 196 : vps(0x50, idst, xmm0, src);
1459 : }
1460 : void vmovmskpd(Register dst, XMMRegister src) {
1461 662 : XMMRegister idst = XMMRegister::from_code(dst.code());
1462 662 : vpd(0x50, idst, xmm0, src);
1463 : }
1464 : void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1465 35 : vps(0xC2, dst, src1, src2);
1466 : emit(cmp);
1467 : }
1468 : void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1469 35 : vps(0xC2, dst, src1, src2);
1470 : emit(cmp);
1471 : }
1472 : void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1473 35 : vpd(0xC2, dst, src1, src2);
1474 : emit(cmp);
1475 : }
1476 : void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1477 35 : vpd(0xC2, dst, src1, src2);
1478 : emit(cmp);
1479 : }
1480 :
1481 : #define AVX_CMP_P(instr, imm8) \
1482 : void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1483 : vcmpps(dst, src1, src2, imm8); \
1484 : } \
1485 : void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
1486 : vcmpps(dst, src1, src2, imm8); \
1487 : } \
1488 : void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1489 : vcmppd(dst, src1, src2, imm8); \
1490 : } \
1491 : void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
1492 : vcmppd(dst, src1, src2, imm8); \
1493 : }
1494 :
1495 40 : AVX_CMP_P(vcmpeq, 0x0)
1496 40 : AVX_CMP_P(vcmplt, 0x1)
1497 40 : AVX_CMP_P(vcmple, 0x2)
1498 40 : AVX_CMP_P(vcmpneq, 0x4)
1499 40 : AVX_CMP_P(vcmpnlt, 0x5)
1500 40 : AVX_CMP_P(vcmpnle, 0x6)
1501 :
1502 : #undef AVX_CMP_P
1503 :
1504 : void vlddqu(XMMRegister dst, Operand src) {
1505 5 : vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
1506 : }
1507 5 : void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1508 5 : vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
1509 : emit(imm8);
1510 5 : }
1511 5 : void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1512 5 : vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
1513 : emit(imm8);
1514 5 : }
1515 5 : void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1516 5 : vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
1517 : emit(imm8);
1518 5 : }
1519 49089 : void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1520 49089 : vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
1521 : emit(imm8);
1522 49090 : }
1523 37172 : void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1524 37172 : vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
1525 : emit(imm8);
1526 37172 : }
1527 5 : void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1528 5 : vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
1529 : emit(imm8);
1530 5 : }
1531 5 : void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
1532 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1533 5 : vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
1534 : emit(imm8);
1535 5 : }
1536 5 : void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
1537 5 : vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
1538 : emit(imm8);
1539 5 : }
1540 5 : void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
1541 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1542 5 : vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
1543 : emit(imm8);
1544 5 : }
1545 5 : void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
1546 5 : vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
1547 : emit(imm8);
1548 5 : }
1549 5 : void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
1550 5 : XMMRegister idst = XMMRegister::from_code(dst.code());
1551 5 : vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
1552 : emit(imm8);
1553 5 : }
1554 5 : void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
1555 5 : vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
1556 : emit(imm8);
1557 5 : }
1558 5 : void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1559 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1560 5 : vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
1561 : emit(imm8);
1562 5 : }
1563 5 : void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1564 5 : vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
1565 : emit(imm8);
1566 5 : }
1567 5 : void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1568 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1569 5 : vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
1570 : emit(imm8);
1571 5 : }
1572 5 : void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1573 5 : vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
1574 : emit(imm8);
1575 5 : }
1576 5 : void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1577 5 : XMMRegister isrc = XMMRegister::from_code(src2.code());
1578 5 : vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
1579 : emit(imm8);
1580 5 : }
1581 5 : void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1582 5 : vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
1583 : emit(imm8);
1584 5 : }
1585 5 : void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1586 5 : vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
1587 : emit(imm8);
1588 5 : }
1589 :
1590 : void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1591 : void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1592 : void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1593 : void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1594 :
1595 : // BMI instruction
1596 : void andnq(Register dst, Register src1, Register src2) {
1597 9 : bmi1q(0xf2, dst, src1, src2);
1598 : }
1599 : void andnq(Register dst, Register src1, Operand src2) {
1600 9 : bmi1q(0xf2, dst, src1, src2);
1601 : }
1602 : void andnl(Register dst, Register src1, Register src2) {
1603 9 : bmi1l(0xf2, dst, src1, src2);
1604 : }
1605 : void andnl(Register dst, Register src1, Operand src2) {
1606 9 : bmi1l(0xf2, dst, src1, src2);
1607 : }
1608 : void bextrq(Register dst, Register src1, Register src2) {
1609 9 : bmi1q(0xf7, dst, src2, src1);
1610 : }
1611 : void bextrq(Register dst, Operand src1, Register src2) {
1612 9 : bmi1q(0xf7, dst, src2, src1);
1613 : }
1614 : void bextrl(Register dst, Register src1, Register src2) {
1615 9 : bmi1l(0xf7, dst, src2, src1);
1616 : }
1617 : void bextrl(Register dst, Operand src1, Register src2) {
1618 9 : bmi1l(0xf7, dst, src2, src1);
1619 : }
1620 9 : void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
1621 9 : void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
1622 9 : void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
1623 9 : void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
1624 9 : void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
1625 9 : void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
1626 9 : void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
1627 9 : void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
1628 9 : void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
1629 9 : void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
1630 9 : void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
1631 9 : void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
1632 : void tzcntq(Register dst, Register src);
1633 : void tzcntq(Register dst, Operand src);
1634 : void tzcntl(Register dst, Register src);
1635 : void tzcntl(Register dst, Operand src);
1636 :
1637 : void lzcntq(Register dst, Register src);
1638 : void lzcntq(Register dst, Operand src);
1639 : void lzcntl(Register dst, Register src);
1640 : void lzcntl(Register dst, Operand src);
1641 :
1642 : void popcntq(Register dst, Register src);
1643 : void popcntq(Register dst, Operand src);
1644 : void popcntl(Register dst, Register src);
1645 : void popcntl(Register dst, Operand src);
1646 :
1647 : void bzhiq(Register dst, Register src1, Register src2) {
1648 9 : bmi2q(kNone, 0xf5, dst, src2, src1);
1649 : }
1650 : void bzhiq(Register dst, Operand src1, Register src2) {
1651 9 : bmi2q(kNone, 0xf5, dst, src2, src1);
1652 : }
1653 : void bzhil(Register dst, Register src1, Register src2) {
1654 9 : bmi2l(kNone, 0xf5, dst, src2, src1);
1655 : }
1656 : void bzhil(Register dst, Operand src1, Register src2) {
1657 9 : bmi2l(kNone, 0xf5, dst, src2, src1);
1658 : }
1659 : void mulxq(Register dst1, Register dst2, Register src) {
1660 9 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1661 : }
1662 : void mulxq(Register dst1, Register dst2, Operand src) {
1663 9 : bmi2q(kF2, 0xf6, dst1, dst2, src);
1664 : }
1665 : void mulxl(Register dst1, Register dst2, Register src) {
1666 9 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1667 : }
1668 : void mulxl(Register dst1, Register dst2, Operand src) {
1669 9 : bmi2l(kF2, 0xf6, dst1, dst2, src);
1670 : }
1671 : void pdepq(Register dst, Register src1, Register src2) {
1672 9 : bmi2q(kF2, 0xf5, dst, src1, src2);
1673 : }
1674 : void pdepq(Register dst, Register src1, Operand src2) {
1675 9 : bmi2q(kF2, 0xf5, dst, src1, src2);
1676 : }
1677 : void pdepl(Register dst, Register src1, Register src2) {
1678 9 : bmi2l(kF2, 0xf5, dst, src1, src2);
1679 : }
1680 : void pdepl(Register dst, Register src1, Operand src2) {
1681 9 : bmi2l(kF2, 0xf5, dst, src1, src2);
1682 : }
1683 : void pextq(Register dst, Register src1, Register src2) {
1684 9 : bmi2q(kF3, 0xf5, dst, src1, src2);
1685 : }
1686 : void pextq(Register dst, Register src1, Operand src2) {
1687 9 : bmi2q(kF3, 0xf5, dst, src1, src2);
1688 : }
1689 : void pextl(Register dst, Register src1, Register src2) {
1690 9 : bmi2l(kF3, 0xf5, dst, src1, src2);
1691 : }
1692 : void pextl(Register dst, Register src1, Operand src2) {
1693 9 : bmi2l(kF3, 0xf5, dst, src1, src2);
1694 : }
1695 : void sarxq(Register dst, Register src1, Register src2) {
1696 9 : bmi2q(kF3, 0xf7, dst, src2, src1);
1697 : }
1698 : void sarxq(Register dst, Operand src1, Register src2) {
1699 9 : bmi2q(kF3, 0xf7, dst, src2, src1);
1700 : }
1701 : void sarxl(Register dst, Register src1, Register src2) {
1702 9 : bmi2l(kF3, 0xf7, dst, src2, src1);
1703 : }
1704 : void sarxl(Register dst, Operand src1, Register src2) {
1705 9 : bmi2l(kF3, 0xf7, dst, src2, src1);
1706 : }
1707 : void shlxq(Register dst, Register src1, Register src2) {
1708 9 : bmi2q(k66, 0xf7, dst, src2, src1);
1709 : }
1710 : void shlxq(Register dst, Operand src1, Register src2) {
1711 9 : bmi2q(k66, 0xf7, dst, src2, src1);
1712 : }
1713 : void shlxl(Register dst, Register src1, Register src2) {
1714 9 : bmi2l(k66, 0xf7, dst, src2, src1);
1715 : }
1716 : void shlxl(Register dst, Operand src1, Register src2) {
1717 9 : bmi2l(k66, 0xf7, dst, src2, src1);
1718 : }
1719 : void shrxq(Register dst, Register src1, Register src2) {
1720 9 : bmi2q(kF2, 0xf7, dst, src2, src1);
1721 : }
1722 : void shrxq(Register dst, Operand src1, Register src2) {
1723 9 : bmi2q(kF2, 0xf7, dst, src2, src1);
1724 : }
1725 : void shrxl(Register dst, Register src1, Register src2) {
1726 9 : bmi2l(kF2, 0xf7, dst, src2, src1);
1727 : }
1728 : void shrxl(Register dst, Operand src1, Register src2) {
1729 9 : bmi2l(kF2, 0xf7, dst, src2, src1);
1730 : }
1731 : void rorxq(Register dst, Register src, byte imm8);
1732 : void rorxq(Register dst, Operand src, byte imm8);
1733 : void rorxl(Register dst, Register src, byte imm8);
1734 : void rorxl(Register dst, Operand src, byte imm8);
1735 :
1736 : void lfence();
1737 : void pause();
1738 :
1739 : // Check the code size generated from label to here.
1740 : int SizeOfCodeGeneratedSince(Label* label) {
1741 : return pc_offset() - label->pos();
1742 : }
1743 :
1744 : // Record a deoptimization reason that can be used by a log or cpu profiler.
1745 : // Use --trace-deopt to enable.
1746 : void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1747 : int id);
1748 :
1749 :
1750 : // Writes a single word of data in the code stream.
1751 : // Used for inline tables, e.g., jump-tables.
1752 : void db(uint8_t data);
1753 : void dd(uint32_t data);
1754 : void dq(uint64_t data);
1755 : void dp(uintptr_t data) { dq(data); }
1756 : void dq(Label* label);
1757 :
1758 : // Patch entries for partial constant pool.
1759 : void PatchConstPool();
1760 :
1761 : // Check if use partial constant pool for this rmode.
1762 : static bool UseConstPoolFor(RelocInfo::Mode rmode);
1763 :
1764 : // Check if there is less than kGap bytes available in the buffer.
1765 : // If this is the case, we need to grow the buffer before emitting
1766 : // an instruction or relocation information.
1767 : inline bool buffer_overflow() const {
1768 326954057 : return pc_ >= reloc_info_writer.pos() - kGap;
1769 : }
1770 :
1771 : // Get the number of bytes available in the buffer.
1772 : inline int available_space() const {
1773 : return static_cast<int>(reloc_info_writer.pos() - pc_);
1774 : }
1775 :
1776 : static bool IsNop(Address addr);
1777 :
1778 : // Avoid overflows for displacements etc.
1779 : static constexpr int kMaximalBufferSize = 512 * MB;
1780 :
1781 : byte byte_at(int pos) { return buffer_start_[pos]; }
1782 1891010 : void set_byte_at(int pos, byte value) { buffer_start_[pos] = value; }
1783 :
1784 : protected:
1785 : // Call near indirect
1786 : void call(Operand operand);
1787 :
1788 : private:
1789 : Address addr_at(int pos) {
1790 63173395 : return reinterpret_cast<Address>(buffer_start_ + pos);
1791 : }
1792 39040924 : uint32_t long_at(int pos) {
1793 : return ReadUnalignedValue<uint32_t>(addr_at(pos));
1794 : }
1795 18278467 : void long_at_put(int pos, uint32_t x) {
1796 : WriteUnalignedValue(addr_at(pos), x);
1797 : }
1798 :
1799 : // code emission
1800 : void GrowBuffer();
1801 :
1802 504519149 : void emit(byte x) { *pc_++ = x; }
1803 : inline void emitl(uint32_t x);
1804 : inline void emitq(uint64_t x);
1805 : inline void emitw(uint16_t x);
1806 : inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
1807 : inline void emit(Immediate x);
1808 : inline void emit(Immediate64 x);
1809 :
1810 : // Emits a REX prefix that encodes a 64-bit operand size and
1811 : // the top bit of both register codes.
1812 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1813 : // REX.W is set.
1814 : inline void emit_rex_64(XMMRegister reg, Register rm_reg);
1815 : inline void emit_rex_64(Register reg, XMMRegister rm_reg);
1816 : inline void emit_rex_64(Register reg, Register rm_reg);
1817 : inline void emit_rex_64(XMMRegister reg, XMMRegister rm_reg);
1818 :
1819 : // Emits a REX prefix that encodes a 64-bit operand size and
1820 : // the top bit of the destination, index, and base register codes.
1821 : // The high bit of reg is used for REX.R, the high bit of op's base
1822 : // register is used for REX.B, and the high bit of op's index register
1823 : // is used for REX.X. REX.W is set.
1824 : inline void emit_rex_64(Register reg, Operand op);
1825 : inline void emit_rex_64(XMMRegister reg, Operand op);
1826 :
1827 : // Emits a REX prefix that encodes a 64-bit operand size and
1828 : // the top bit of the register code.
1829 : // The high bit of register is used for REX.B.
1830 : // REX.W is set and REX.R and REX.X are clear.
1831 : inline void emit_rex_64(Register rm_reg);
1832 :
1833 : // Emits a REX prefix that encodes a 64-bit operand size and
1834 : // the top bit of the index and base register codes.
1835 : // The high bit of op's base register is used for REX.B, and the high
1836 : // bit of op's index register is used for REX.X.
1837 : // REX.W is set and REX.R clear.
1838 : inline void emit_rex_64(Operand op);
1839 :
1840 : // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
1841 : void emit_rex_64() { emit(0x48); }
1842 :
1843 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1844 : // REX.W is clear.
1845 : inline void emit_rex_32(Register reg, Register rm_reg);
1846 :
1847 : // The high bit of reg is used for REX.R, the high bit of op's base
1848 : // register is used for REX.B, and the high bit of op's index register
1849 : // is used for REX.X. REX.W is cleared.
1850 : inline void emit_rex_32(Register reg, Operand op);
1851 :
1852 : // High bit of rm_reg goes to REX.B.
1853 : // REX.W, REX.R and REX.X are clear.
1854 : inline void emit_rex_32(Register rm_reg);
1855 :
1856 : // High bit of base goes to REX.B and high bit of index to REX.X.
1857 : // REX.W and REX.R are clear.
1858 : inline void emit_rex_32(Operand op);
1859 :
1860 : // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1861 : // REX.W is cleared. If no REX bits are set, no byte is emitted.
1862 : inline void emit_optional_rex_32(Register reg, Register rm_reg);
1863 :
1864 : // The high bit of reg is used for REX.R, the high bit of op's base
1865 : // register is used for REX.B, and the high bit of op's index register
1866 : // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
1867 : // is emitted.
1868 : inline void emit_optional_rex_32(Register reg, Operand op);
1869 :
1870 : // As for emit_optional_rex_32(Register, Register), except that
1871 : // the registers are XMM registers.
1872 : inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
1873 :
1874 : // As for emit_optional_rex_32(Register, Register), except that
1875 : // one of the registers is an XMM registers.
1876 : inline void emit_optional_rex_32(XMMRegister reg, Register base);
1877 :
1878 : // As for emit_optional_rex_32(Register, Register), except that
1879 : // one of the registers is an XMM registers.
1880 : inline void emit_optional_rex_32(Register reg, XMMRegister base);
1881 :
1882 : // As for emit_optional_rex_32(Register, Operand), except that
1883 : // the register is an XMM register.
1884 : inline void emit_optional_rex_32(XMMRegister reg, Operand op);
1885 :
1886 : // Optionally do as emit_rex_32(Register) if the register number has
1887 : // the high bit set.
1888 : inline void emit_optional_rex_32(Register rm_reg);
1889 : inline void emit_optional_rex_32(XMMRegister rm_reg);
1890 :
1891 : // Optionally do as emit_rex_32(Operand) if the operand register
1892 : // numbers have a high bit set.
1893 : inline void emit_optional_rex_32(Operand op);
1894 :
1895 : void emit_rex(int size) {
1896 0 : if (size == kInt64Size) {
1897 : emit_rex_64();
1898 : } else {
1899 : DCHECK_EQ(size, kInt32Size);
1900 : }
1901 : }
1902 :
1903 : template<class P1>
1904 : void emit_rex(P1 p1, int size) {
1905 66572386 : if (size == kInt64Size) {
1906 : emit_rex_64(p1);
1907 : } else {
1908 : DCHECK_EQ(size, kInt32Size);
1909 : emit_optional_rex_32(p1);
1910 : }
1911 : }
1912 :
1913 : template<class P1, class P2>
1914 69237453 : void emit_rex(P1 p1, P2 p2, int size) {
1915 69237453 : if (size == kInt64Size) {
1916 : emit_rex_64(p1, p2);
1917 : } else {
1918 : DCHECK_EQ(size, kInt32Size);
1919 : emit_optional_rex_32(p1, p2);
1920 : }
1921 69237453 : }
1922 :
1923 : // Emit vex prefix
1924 : void emit_vex2_byte0() { emit(0xc5); }
1925 : inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
1926 : SIMDPrefix pp);
1927 : void emit_vex3_byte0() { emit(0xc4); }
1928 : inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
1929 : inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
1930 : inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
1931 : SIMDPrefix pp);
1932 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
1933 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1934 : VexW w);
1935 : inline void emit_vex_prefix(Register reg, Register v, Register rm,
1936 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1937 : VexW w);
1938 : inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
1939 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1940 : VexW w);
1941 : inline void emit_vex_prefix(Register reg, Register v, Operand rm,
1942 : VectorLength l, SIMDPrefix pp, LeadingOpcode m,
1943 : VexW w);
1944 :
1945 : // Emit the ModR/M byte, and optionally the SIB byte and
1946 : // 1- or 4-byte offset for a memory operand. Also encodes
1947 : // the second operand of the operation, a register or operation
1948 : // subcode, into the reg field of the ModR/M byte.
1949 : void emit_operand(Register reg, Operand adr) {
1950 54356492 : emit_operand(reg.low_bits(), adr);
1951 : }
1952 :
1953 : // Emit the ModR/M byte, and optionally the SIB byte and
1954 : // 1- or 4-byte offset for a memory operand. Also used to encode
1955 : // a three-bit opcode extension into the ModR/M byte.
1956 : void emit_operand(int rm, Operand adr);
1957 :
1958 : // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
1959 : void emit_modrm(Register reg, Register rm_reg) {
1960 27666691 : emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
1961 : }
1962 :
1963 : // Emit a ModR/M byte with an operation subcode in the reg field and
1964 : // a register in the rm_reg field.
1965 : void emit_modrm(int code, Register rm_reg) {
1966 : DCHECK(is_uint3(code));
1967 67074318 : emit(0xC0 | code << 3 | rm_reg.low_bits());
1968 : }
1969 :
1970 : // Emit the code-object-relative offset of the label's position
1971 : inline void emit_code_relative_offset(Label* label);
1972 :
1973 : // The first argument is the reg field, the second argument is the r/m field.
1974 : void emit_sse_operand(XMMRegister dst, XMMRegister src);
1975 : void emit_sse_operand(XMMRegister reg, Operand adr);
1976 : void emit_sse_operand(Register reg, Operand adr);
1977 : void emit_sse_operand(XMMRegister dst, Register src);
1978 : void emit_sse_operand(Register dst, XMMRegister src);
1979 : void emit_sse_operand(XMMRegister dst);
1980 :
1981 : // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
1982 : // AND, OR, XOR, or CMP. The encodings of these operations are all
1983 : // similar, differing just in the opcode or in the reg field of the
1984 : // ModR/M byte.
1985 : void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
1986 : void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
1987 : void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
1988 : void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
1989 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
1990 : void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
1991 : void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
1992 : // Operate on a byte in memory or register.
1993 : void immediate_arithmetic_op_8(byte subcode,
1994 : Register dst,
1995 : Immediate src);
1996 : void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
1997 : // Operate on a word in memory or register.
1998 : void immediate_arithmetic_op_16(byte subcode,
1999 : Register dst,
2000 : Immediate src);
2001 : void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
2002 : // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2003 : void immediate_arithmetic_op(byte subcode,
2004 : Register dst,
2005 : Immediate src,
2006 : int size);
2007 : void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
2008 : int size);
2009 :
2010 : // Emit machine code for a shift operation.
2011 : void shift(Operand dst, Immediate shift_amount, int subcode, int size);
2012 : void shift(Register dst, Immediate shift_amount, int subcode, int size);
2013 : // Shift dst by cl % 64 bits.
2014 : void shift(Register dst, int subcode, int size);
2015 : void shift(Operand dst, int subcode, int size);
2016 :
2017 : void emit_farith(int b1, int b2, int i);
2018 :
2019 : // labels
2020 : // void print(Label* L);
2021 : void bind_to(Label* L, int pos);
2022 :
2023 : // record reloc info for current pc_
2024 : void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
2025 :
2026 : // Arithmetics
2027 152028 : void emit_add(Register dst, Register src, int size) {
2028 1077280 : arithmetic_op(0x03, dst, src, size);
2029 152028 : }
2030 :
2031 303963 : void emit_add(Register dst, Immediate src, int size) {
2032 1976149 : immediate_arithmetic_op(0x0, dst, src, size);
2033 303963 : }
2034 :
2035 126 : void emit_add(Register dst, Operand src, int size) {
2036 21954 : arithmetic_op(0x03, dst, src, size);
2037 126 : }
2038 :
2039 : void emit_add(Operand dst, Register src, int size) {
2040 164008 : arithmetic_op(0x1, src, dst, size);
2041 : }
2042 :
2043 336 : void emit_add(Operand dst, Immediate src, int size) {
2044 6449 : immediate_arithmetic_op(0x0, dst, src, size);
2045 336 : }
2046 :
2047 : void emit_and(Register dst, Register src, int size) {
2048 836841 : arithmetic_op(0x23, dst, src, size);
2049 : }
2050 :
2051 : void emit_and(Register dst, Operand src, int size) {
2052 4781 : arithmetic_op(0x23, dst, src, size);
2053 : }
2054 :
2055 : void emit_and(Operand dst, Register src, int size) {
2056 : arithmetic_op(0x21, src, dst, size);
2057 : }
2058 :
2059 111293 : void emit_and(Register dst, Immediate src, int size) {
2060 1107720 : immediate_arithmetic_op(0x4, dst, src, size);
2061 111293 : }
2062 :
2063 : void emit_and(Operand dst, Immediate src, int size) {
2064 0 : immediate_arithmetic_op(0x4, dst, src, size);
2065 : }
2066 :
2067 280 : void emit_cmp(Register dst, Register src, int size) {
2068 1936671 : arithmetic_op(0x3B, dst, src, size);
2069 280 : }
2070 :
2071 168 : void emit_cmp(Register dst, Operand src, int size) {
2072 868235 : arithmetic_op(0x3B, dst, src, size);
2073 168 : }
2074 :
2075 : void emit_cmp(Operand dst, Register src, int size) {
2076 1535013 : arithmetic_op(0x39, src, dst, size);
2077 : }
2078 :
2079 382418 : void emit_cmp(Register dst, Immediate src, int size) {
2080 2891319 : immediate_arithmetic_op(0x7, dst, src, size);
2081 382418 : }
2082 :
2083 35323 : void emit_cmp(Operand dst, Immediate src, int size) {
2084 188696 : immediate_arithmetic_op(0x7, dst, src, size);
2085 35323 : }
2086 :
2087 : // Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
2088 : // src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
2089 : // operation is only atomic if prefixed by the lock instruction.
2090 : void emit_cmpxchg(Operand dst, Register src, int size);
2091 :
2092 : void emit_dec(Register dst, int size);
2093 : void emit_dec(Operand dst, int size);
2094 :
2095 : // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
2096 : // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
2097 : // when size is 32.
2098 : void emit_idiv(Register src, int size);
2099 : void emit_div(Register src, int size);
2100 :
2101 : // Signed multiply instructions.
2102 : // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
2103 : void emit_imul(Register src, int size);
2104 : void emit_imul(Operand src, int size);
2105 : void emit_imul(Register dst, Register src, int size);
2106 : void emit_imul(Register dst, Operand src, int size);
2107 : void emit_imul(Register dst, Register src, Immediate imm, int size);
2108 : void emit_imul(Register dst, Operand src, Immediate imm, int size);
2109 :
2110 : void emit_inc(Register dst, int size);
2111 : void emit_inc(Operand dst, int size);
2112 :
2113 : void emit_lea(Register dst, Operand src, int size);
2114 :
2115 : void emit_mov(Register dst, Operand src, int size);
2116 : void emit_mov(Register dst, Register src, int size);
2117 : void emit_mov(Operand dst, Register src, int size);
2118 : void emit_mov(Register dst, Immediate value, int size);
2119 : void emit_mov(Operand dst, Immediate value, int size);
2120 : void emit_mov(Register dst, Immediate64 value, int size);
2121 :
2122 : void emit_movzxb(Register dst, Operand src, int size);
2123 : void emit_movzxb(Register dst, Register src, int size);
2124 : void emit_movzxw(Register dst, Operand src, int size);
2125 : void emit_movzxw(Register dst, Register src, int size);
2126 :
2127 : void emit_neg(Register dst, int size);
2128 : void emit_neg(Operand dst, int size);
2129 :
2130 : void emit_not(Register dst, int size);
2131 : void emit_not(Operand dst, int size);
2132 :
2133 : void emit_or(Register dst, Register src, int size) {
2134 169221 : arithmetic_op(0x0B, dst, src, size);
2135 : }
2136 :
2137 : void emit_or(Register dst, Operand src, int size) {
2138 7361 : arithmetic_op(0x0B, dst, src, size);
2139 : }
2140 :
2141 : void emit_or(Operand dst, Register src, int size) {
2142 4 : arithmetic_op(0x9, src, dst, size);
2143 : }
2144 :
2145 : void emit_or(Register dst, Immediate src, int size) {
2146 31311 : immediate_arithmetic_op(0x1, dst, src, size);
2147 : }
2148 :
2149 : void emit_or(Operand dst, Immediate src, int size) {
2150 0 : immediate_arithmetic_op(0x1, dst, src, size);
2151 : }
2152 :
2153 : void emit_repmovs(int size);
2154 :
2155 : void emit_sbb(Register dst, Register src, int size) {
2156 5 : arithmetic_op(0x1b, dst, src, size);
2157 : }
2158 :
2159 1344 : void emit_sub(Register dst, Register src, int size) {
2160 213483 : arithmetic_op(0x2B, dst, src, size);
2161 1344 : }
2162 :
2163 138080 : void emit_sub(Register dst, Immediate src, int size) {
2164 3638524 : immediate_arithmetic_op(0x5, dst, src, size);
2165 138080 : }
2166 :
2167 : void emit_sub(Register dst, Operand src, int size) {
2168 171635 : arithmetic_op(0x2B, dst, src, size);
2169 : }
2170 :
2171 : void emit_sub(Operand dst, Register src, int size) {
2172 164012 : arithmetic_op(0x29, src, dst, size);
2173 : }
2174 :
2175 112 : void emit_sub(Operand dst, Immediate src, int size) {
2176 117 : immediate_arithmetic_op(0x5, dst, src, size);
2177 112 : }
2178 :
2179 : void emit_test(Register dst, Register src, int size);
2180 : void emit_test(Register reg, Immediate mask, int size);
2181 : void emit_test(Operand op, Register reg, int size);
2182 : void emit_test(Operand op, Immediate mask, int size);
2183 : void emit_test(Register reg, Operand op, int size) {
2184 358 : return emit_test(op, reg, size);
2185 : }
2186 :
2187 : void emit_xchg(Register dst, Register src, int size);
2188 : void emit_xchg(Register dst, Operand src, int size);
2189 :
2190 2751466 : void emit_xor(Register dst, Register src, int size) {
2191 2757746 : if (size == kInt64Size && dst.code() == src.code()) {
2192 : // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
2193 : // there is no need to make this a 64 bit operation.
2194 3870 : arithmetic_op(0x33, dst, src, kInt32Size);
2195 : } else {
2196 2747596 : arithmetic_op(0x33, dst, src, size);
2197 : }
2198 2751464 : }
2199 :
2200 : void emit_xor(Register dst, Operand src, int size) {
2201 548 : arithmetic_op(0x33, dst, src, size);
2202 : }
2203 :
2204 : void emit_xor(Register dst, Immediate src, int size) {
2205 22603 : immediate_arithmetic_op(0x6, dst, src, size);
2206 : }
2207 :
2208 : void emit_xor(Operand dst, Immediate src, int size) {
2209 0 : immediate_arithmetic_op(0x6, dst, src, size);
2210 : }
2211 :
2212 : void emit_xor(Operand dst, Register src, int size) {
2213 4 : arithmetic_op(0x31, src, dst, size);
2214 : }
2215 :
2216 : // Most BMI instructions are similar.
2217 : void bmi1q(byte op, Register reg, Register vreg, Register rm);
2218 : void bmi1q(byte op, Register reg, Register vreg, Operand rm);
2219 : void bmi1l(byte op, Register reg, Register vreg, Register rm);
2220 : void bmi1l(byte op, Register reg, Register vreg, Operand rm);
2221 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2222 : void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2223 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2224 : void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2225 :
2226 : // record the position of jmp/jcc instruction
2227 : void record_farjmp_position(Label* L, int pos);
2228 :
2229 : bool is_optimizable_farjmp(int idx);
2230 :
2231 : void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
2232 :
2233 : int WriteCodeComments();
2234 :
2235 : friend class EnsureSpace;
2236 : friend class RegExpMacroAssemblerX64;
2237 :
2238 : // code generation
2239 : RelocInfoWriter reloc_info_writer;
2240 :
2241 : // Internal reference positions, required for (potential) patching in
2242 : // GrowBuffer(); contains only those internal references whose labels
2243 : // are already bound.
2244 : std::deque<int> internal_reference_positions_;
2245 :
2246 : // Variables for this instance of assembler
2247 : int farjmp_num_ = 0;
2248 : std::deque<int> farjmp_positions_;
2249 : std::map<Label*, std::vector<int>> label_farjmp_maps_;
2250 :
2251 : ConstPool constpool_;
2252 :
2253 : friend class ConstPool;
2254 : };
2255 :
2256 :
2257 : // Helper class that ensures that there is enough space for generating
2258 : // instructions and relocation information. The constructor makes
2259 : // sure that there is enough space and (in debug mode) the destructor
2260 : // checks that we did not generate too much.
2261 : class EnsureSpace {
2262 : public:
2263 : explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
2264 326954057 : if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
2265 : #ifdef DEBUG
2266 : space_before_ = assembler_->available_space();
2267 : #endif
2268 : }
2269 :
2270 : #ifdef DEBUG
2271 : ~EnsureSpace() {
2272 : int bytes_generated = space_before_ - assembler_->available_space();
2273 : DCHECK(bytes_generated < assembler_->kGap);
2274 : }
2275 : #endif
2276 :
2277 : private:
2278 : Assembler* assembler_;
2279 : #ifdef DEBUG
2280 : int space_before_;
2281 : #endif
2282 : };
2283 :
2284 : } // namespace internal
2285 : } // namespace v8
2286 :
2287 : #endif // V8_X64_ASSEMBLER_X64_H_
|