Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
6 : #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
7 :
8 : #include <iosfwd>
9 : #include <memory>
10 :
11 : #include "src/base/bits.h"
12 : #include "src/base/small-vector.h"
13 : #include "src/frames.h"
14 : #include "src/macro-assembler.h"
15 : #include "src/wasm/baseline/liftoff-assembler-defs.h"
16 : #include "src/wasm/baseline/liftoff-register.h"
17 : #include "src/wasm/function-body-decoder.h"
18 : #include "src/wasm/wasm-code-manager.h"
19 : #include "src/wasm/wasm-module.h"
20 : #include "src/wasm/wasm-opcodes.h"
21 : #include "src/wasm/wasm-value.h"
22 :
23 : namespace v8 {
24 : namespace internal {
25 :
26 : // Forward declarations.
27 : namespace compiler {
28 : class CallDescriptor;
29 : }
30 :
31 : namespace wasm {
32 :
33 : class LiftoffAssembler : public TurboAssembler {
34 : public:
35 : // Each slot in our stack frame currently has exactly 8 bytes.
36 : static constexpr uint32_t kStackSlotSize = 8;
37 :
38 : static constexpr ValueType kWasmIntPtr =
39 : kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
40 :
41 : class VarState {
42 : public:
43 : enum Location : uint8_t { kStack, kRegister, kIntConst };
44 :
45 : explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
46 : explicit VarState(ValueType type, LiftoffRegister r)
47 1135351 : : loc_(kRegister), type_(type), reg_(r) {
48 : DCHECK_EQ(r.reg_class(), reg_class_for(type));
49 : }
50 : explicit VarState(ValueType type, int32_t i32_const)
51 1125496 : : loc_(kIntConst), type_(type), i32_const_(i32_const) {
52 : DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
53 : }
54 :
55 : bool operator==(const VarState& other) const {
56 : if (loc_ != other.loc_) return false;
57 : if (type_ != other.type_) return false;
58 : switch (loc_) {
59 : case kStack:
60 : return true;
61 : case kRegister:
62 : return reg_ == other.reg_;
63 : case kIntConst:
64 : return i32_const_ == other.i32_const_;
65 : }
66 : UNREACHABLE();
67 : }
68 :
69 : bool is_stack() const { return loc_ == kStack; }
70 : bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
71 : bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
72 : bool is_reg() const { return loc_ == kRegister; }
73 : bool is_const() const { return loc_ == kIntConst; }
74 :
75 : ValueType type() const { return type_; }
76 :
77 : Location loc() const { return loc_; }
78 :
79 : int32_t i32_const() const {
80 : DCHECK_EQ(loc_, kIntConst);
81 : return i32_const_;
82 : }
83 : WasmValue constant() const {
84 : DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
85 : DCHECK_EQ(loc_, kIntConst);
86 : return type_ == kWasmI32 ? WasmValue(i32_const_)
87 928737 : : WasmValue(int64_t{i32_const_});
88 : }
89 :
90 : Register gp_reg() const { return reg().gp(); }
91 : DoubleRegister fp_reg() const { return reg().fp(); }
92 : LiftoffRegister reg() const {
93 : DCHECK_EQ(loc_, kRegister);
94 : return reg_;
95 : }
96 : RegClass reg_class() const { return reg().reg_class(); }
97 :
98 74930 : void MakeStack() { loc_ = kStack; }
99 :
100 : private:
101 : Location loc_;
102 : // TODO(wasm): This is redundant, the decoder already knows the type of each
103 : // stack value. Try to collapse.
104 : ValueType type_;
105 :
106 : union {
107 : LiftoffRegister reg_; // used if loc_ == kRegister
108 : int32_t i32_const_; // used if loc_ == kIntConst
109 : };
110 : };
111 :
112 : ASSERT_TRIVIALLY_COPYABLE(VarState);
113 :
114 1120921 : struct CacheState {
115 : // Allow default construction, move construction, and move assignment.
116 989988 : CacheState() = default;
117 151393 : CacheState(CacheState&&) V8_NOEXCEPT = default;
118 : CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
119 :
120 : base::SmallVector<VarState, 8> stack_state;
121 : LiftoffRegList used_registers;
122 : uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
123 : LiftoffRegList last_spilled_regs;
124 :
125 : bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
126 : if (kNeedI64RegPair && rc == kGpRegPair) {
127 : LiftoffRegList available_regs =
128 : kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
129 : return available_regs.GetNumRegsSet() >= 2;
130 : }
131 : DCHECK(rc == kGpReg || rc == kFpReg);
132 : LiftoffRegList candidates = GetCacheRegList(rc);
133 : return has_unused_register(candidates, pinned);
134 : }
135 :
136 : bool has_unused_register(LiftoffRegList candidates,
137 : LiftoffRegList pinned = {}) const {
138 : LiftoffRegList available_regs =
139 : candidates.MaskOut(used_registers).MaskOut(pinned);
140 : return !available_regs.is_empty();
141 : }
142 :
143 : LiftoffRegister unused_register(RegClass rc,
144 : LiftoffRegList pinned = {}) const {
145 : if (kNeedI64RegPair && rc == kGpRegPair) {
146 : Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
147 : Register high = unused_register(kGpReg, pinned).gp();
148 : return LiftoffRegister::ForPair(low, high);
149 : }
150 : DCHECK(rc == kGpReg || rc == kFpReg);
151 : LiftoffRegList candidates = GetCacheRegList(rc);
152 : return unused_register(candidates, pinned);
153 : }
154 :
155 : LiftoffRegister unused_register(LiftoffRegList candidates,
156 : LiftoffRegList pinned = {}) const {
157 : LiftoffRegList available_regs =
158 : candidates.MaskOut(used_registers).MaskOut(pinned);
159 : return available_regs.GetFirstRegSet();
160 : }
161 :
162 : void inc_used(LiftoffRegister reg) {
163 : if (reg.is_pair()) {
164 : inc_used(reg.low());
165 : inc_used(reg.high());
166 : return;
167 : }
168 : used_registers.set(reg);
169 : DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
170 1151323 : ++register_use_count[reg.liftoff_code()];
171 : }
172 :
173 : // Returns whether this was the last use.
174 : void dec_used(LiftoffRegister reg) {
175 : DCHECK(is_used(reg));
176 : if (reg.is_pair()) {
177 : dec_used(reg.low());
178 : dec_used(reg.high());
179 : return;
180 : }
181 : int code = reg.liftoff_code();
182 : DCHECK_LT(0, register_use_count[code]);
183 469424 : if (--register_use_count[code] == 0) used_registers.clear(reg);
184 : }
185 :
186 : bool is_used(LiftoffRegister reg) const {
187 : if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
188 : bool used = used_registers.has(reg);
189 : DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
190 : return used;
191 : }
192 :
193 : uint32_t get_use_count(LiftoffRegister reg) const {
194 : if (reg.is_pair()) {
195 : DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
196 : register_use_count[reg.high().liftoff_code()]);
197 : reg = reg.low();
198 : }
199 : DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
200 64592 : return register_use_count[reg.liftoff_code()];
201 : }
202 :
203 : void clear_used(LiftoffRegister reg) {
204 64591 : register_use_count[reg.liftoff_code()] = 0;
205 : used_registers.clear(reg);
206 : }
207 :
208 : bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
209 :
210 : void reset_used_registers() {
211 75181 : used_registers = {};
212 75181 : memset(register_use_count, 0, sizeof(register_use_count));
213 : }
214 :
215 : LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
216 : LiftoffRegList pinned = {}) {
217 : LiftoffRegList unpinned = candidates.MaskOut(pinned);
218 : DCHECK(!unpinned.is_empty());
219 : // This method should only be called if none of the candidates is free.
220 : DCHECK(unpinned.MaskOut(used_registers).is_empty());
221 : LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
222 59521 : if (unspilled.is_empty()) {
223 : unspilled = unpinned;
224 5072 : last_spilled_regs = {};
225 : }
226 : LiftoffRegister reg = unspilled.GetFirstRegSet();
227 : last_spilled_regs.set(reg);
228 : return reg;
229 : }
230 :
231 : // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
232 : void InitMerge(const CacheState& source, uint32_t num_locals,
233 : uint32_t arity, uint32_t stack_depth);
234 :
235 : void Steal(const CacheState& source);
236 :
237 : void Split(const CacheState& source);
238 :
239 : uint32_t stack_height() const {
240 1308964 : return static_cast<uint32_t>(stack_state.size());
241 : }
242 :
243 : private:
244 : // Make the copy assignment operator private (to be used from {Split()}).
245 : CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
246 : // Disallow copy construction.
247 : CacheState(const CacheState&) = delete;
248 : };
249 :
250 : explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
251 : ~LiftoffAssembler() override;
252 :
253 : LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
254 :
255 : void PushRegister(ValueType type, LiftoffRegister reg) {
256 : DCHECK_EQ(reg_class_for(type), reg.reg_class());
257 : cache_state_.inc_used(reg);
258 1135664 : cache_state_.stack_state.emplace_back(type, reg);
259 : }
260 :
261 : void SpillRegister(LiftoffRegister);
262 :
263 : uint32_t GetNumUses(LiftoffRegister reg) {
264 : return cache_state_.get_use_count(reg);
265 : }
266 :
267 : // Get an unused register for class {rc}, reusing one of {try_first} if
268 : // possible.
269 567167 : LiftoffRegister GetUnusedRegister(
270 : RegClass rc, std::initializer_list<LiftoffRegister> try_first,
271 : LiftoffRegList pinned = {}) {
272 646651 : for (LiftoffRegister reg : try_first) {
273 : DCHECK_EQ(reg.reg_class(), rc);
274 583123 : if (cache_state_.is_free(reg)) return reg;
275 : }
276 : return GetUnusedRegister(rc, pinned);
277 : }
278 :
279 : // Get an unused register for class {rc}, potentially spilling to free one.
280 : LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
281 : if (kNeedI64RegPair && rc == kGpRegPair) {
282 : LiftoffRegList candidates = kGpCacheRegList;
283 : Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
284 : Register high = GetUnusedRegister(candidates, pinned).gp();
285 : return LiftoffRegister::ForPair(low, high);
286 : }
287 : DCHECK(rc == kGpReg || rc == kFpReg);
288 1557332 : LiftoffRegList candidates = GetCacheRegList(rc);
289 1557332 : return GetUnusedRegister(candidates, pinned);
290 : }
291 :
292 : // Get an unused register of {candidates}, potentially spilling to free one.
293 1556613 : LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
294 : LiftoffRegList pinned = {}) {
295 1556613 : if (cache_state_.has_unused_register(candidates, pinned)) {
296 : return cache_state_.unused_register(candidates, pinned);
297 : }
298 59521 : return SpillOneRegister(candidates, pinned);
299 : }
300 :
301 : void MergeFullStackWith(const CacheState& target, const CacheState& source);
302 : void MergeStackWith(const CacheState& target, uint32_t arity);
303 :
304 : void Spill(uint32_t index);
305 : void SpillLocals();
306 : void SpillAllRegisters();
307 :
308 : // Call this method whenever spilling something, such that the number of used
309 : // spill slot can be tracked and the stack frame will be allocated big enough.
310 : void RecordUsedSpillSlot(uint32_t index) {
311 75577 : if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
312 : }
313 :
314 : // Load parameters into the right registers / stack slots for the call.
315 : // Move {*target} into another register if needed and update {*target} to that
316 : // register, or {no_reg} if target was spilled to the stack.
317 : void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
318 : Register* target = nullptr,
319 : Register* target_instance = nullptr);
320 : // Process return values of the call.
321 : void FinishCall(FunctionSig*, compiler::CallDescriptor*);
322 :
323 : // Move {src} into {dst}. {src} and {dst} must be different.
324 : void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
325 :
326 : // Parallel register move: For a list of tuples <dst, src, type>, move the
327 : // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
328 : // that tuple.
329 : struct ParallelRegisterMoveTuple {
330 : LiftoffRegister dst;
331 : LiftoffRegister src;
332 : ValueType type;
333 : template <typename Dst, typename Src>
334 : ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
335 : : dst(dst), src(src), type(type) {}
336 : };
337 : void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
338 :
339 : void MoveToReturnRegisters(FunctionSig*);
340 :
341 : #ifdef ENABLE_SLOW_DCHECKS
342 : // Validate that the register use counts reflect the state of the cache.
343 : bool ValidateCacheState() const;
344 : #endif
345 :
346 : ////////////////////////////////////
347 : // Platform-specific part. //
348 : ////////////////////////////////////
349 :
350 : // This function emits machine code to prepare the stack frame, before the
351 : // size of the stack frame is known. It returns an offset in the machine code
352 : // which can later be patched (via {PatchPrepareStackFrame)} when the size of
353 : // the frame is known.
354 : inline int PrepareStackFrame();
355 : inline void PatchPrepareStackFrame(int offset, uint32_t stack_slots);
356 : inline void FinishCode();
357 : inline void AbortCompilation();
358 :
359 : inline void LoadConstant(LiftoffRegister, WasmValue,
360 : RelocInfo::Mode rmode = RelocInfo::NONE);
361 : inline void LoadFromInstance(Register dst, uint32_t offset, int size);
362 : inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
363 : inline void SpillInstance(Register instance);
364 : inline void FillInstanceInto(Register dst);
365 : inline void LoadTaggedPointer(Register dst, Register src_addr,
366 : Register offset_reg, uint32_t offset_imm,
367 : LiftoffRegList pinned);
368 : inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
369 : uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
370 : uint32_t* protected_load_pc = nullptr,
371 : bool is_load_mem = false);
372 : inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
373 : LiftoffRegister src, StoreType type, LiftoffRegList pinned,
374 : uint32_t* protected_store_pc = nullptr,
375 : bool is_store_mem = false);
376 : inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
377 : ValueType);
378 : inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
379 :
380 : inline void Move(Register dst, Register src, ValueType);
381 : inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
382 :
383 : inline void Spill(uint32_t index, LiftoffRegister, ValueType);
384 : inline void Spill(uint32_t index, WasmValue);
385 : inline void Fill(LiftoffRegister, uint32_t index, ValueType);
386 : // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
387 : // 4 bytes on the stack holding half of a 64-bit value.
388 : inline void FillI64Half(Register, uint32_t index, RegPairHalf);
389 :
390 : // i32 binops.
391 : inline void emit_i32_add(Register dst, Register lhs, Register rhs);
392 : inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
393 : inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
394 : inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
395 : inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
396 : Label* trap_div_by_zero,
397 : Label* trap_div_unrepresentable);
398 : inline void emit_i32_divu(Register dst, Register lhs, Register rhs,
399 : Label* trap_div_by_zero);
400 : inline void emit_i32_rems(Register dst, Register lhs, Register rhs,
401 : Label* trap_rem_by_zero);
402 : inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
403 : Label* trap_rem_by_zero);
404 : inline void emit_i32_and(Register dst, Register lhs, Register rhs);
405 : inline void emit_i32_or(Register dst, Register lhs, Register rhs);
406 : inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
407 : inline void emit_i32_shl(Register dst, Register src, Register amount,
408 : LiftoffRegList pinned = {});
409 : inline void emit_i32_sar(Register dst, Register src, Register amount,
410 : LiftoffRegList pinned = {});
411 : inline void emit_i32_shr(Register dst, Register src, Register amount,
412 : LiftoffRegList pinned = {});
413 : inline void emit_i32_shr(Register dst, Register src, int amount);
414 :
415 : // i32 unops.
416 : inline bool emit_i32_clz(Register dst, Register src);
417 : inline bool emit_i32_ctz(Register dst, Register src);
418 : inline bool emit_i32_popcnt(Register dst, Register src);
419 :
420 : // i64 binops.
421 : inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
422 : LiftoffRegister rhs);
423 : inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
424 : int32_t imm);
425 : inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
426 : LiftoffRegister rhs);
427 : inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
428 : LiftoffRegister rhs);
429 : inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
430 : LiftoffRegister rhs, Label* trap_div_by_zero,
431 : Label* trap_div_unrepresentable);
432 : inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
433 : LiftoffRegister rhs, Label* trap_div_by_zero);
434 : inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
435 : LiftoffRegister rhs, Label* trap_rem_by_zero);
436 : inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
437 : LiftoffRegister rhs, Label* trap_rem_by_zero);
438 : inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
439 : LiftoffRegister rhs);
440 : inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
441 : LiftoffRegister rhs);
442 : inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
443 : LiftoffRegister rhs);
444 : inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
445 : Register amount, LiftoffRegList pinned = {});
446 : inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
447 : Register amount, LiftoffRegList pinned = {});
448 : inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
449 : Register amount, LiftoffRegList pinned = {});
450 : inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
451 : int amount);
452 :
453 : inline void emit_i32_to_intptr(Register dst, Register src);
454 :
455 : inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
456 : if (kSystemPointerSize == 8) {
457 : emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
458 : LiftoffRegister(rhs));
459 : } else {
460 : emit_i32_add(dst, lhs, rhs);
461 : }
462 : }
463 : inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
464 : if (kSystemPointerSize == 8) {
465 1 : emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
466 1 : LiftoffRegister(rhs));
467 : } else {
468 : emit_i32_sub(dst, lhs, rhs);
469 : }
470 : }
471 : inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
472 : if (kSystemPointerSize == 8) {
473 : emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
474 : LiftoffRegister(rhs));
475 : } else {
476 : emit_i32_and(dst, lhs, rhs);
477 : }
478 : }
479 : inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
480 : if (kSystemPointerSize == 8) {
481 192 : emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
482 : } else {
483 : emit_i32_shr(dst, src, amount);
484 : }
485 : }
486 :
487 : inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
488 : if (kSystemPointerSize == 8) {
489 0 : emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
490 : } else {
491 : emit_i32_add(dst, lhs, imm);
492 : }
493 : }
494 :
495 : // f32 binops.
496 : inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
497 : DoubleRegister rhs);
498 : inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
499 : DoubleRegister rhs);
500 : inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
501 : DoubleRegister rhs);
502 : inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
503 : DoubleRegister rhs);
504 : inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
505 : DoubleRegister rhs);
506 : inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
507 : DoubleRegister rhs);
508 : inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
509 : DoubleRegister rhs);
510 :
511 : // f32 unops.
512 : inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
513 : inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
514 : inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
515 : inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src);
516 : inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
517 : inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
518 : inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
519 :
520 : // f64 binops.
521 : inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
522 : DoubleRegister rhs);
523 : inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
524 : DoubleRegister rhs);
525 : inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
526 : DoubleRegister rhs);
527 : inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
528 : DoubleRegister rhs);
529 : inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
530 : DoubleRegister rhs);
531 : inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
532 : DoubleRegister rhs);
533 : inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
534 : DoubleRegister rhs);
535 :
536 : // f64 unops.
537 : inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
538 : inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
539 : inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
540 : inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
541 : inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
542 : inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
543 : inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
544 :
545 : inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
546 : LiftoffRegister src, Label* trap = nullptr);
547 :
548 : inline void emit_i32_signextend_i8(Register dst, Register src);
549 : inline void emit_i32_signextend_i16(Register dst, Register src);
550 : inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src);
551 : inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src);
552 : inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src);
553 :
554 : inline void emit_jump(Label*);
555 : inline void emit_jump(Register);
556 :
557 : inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
558 : Register rhs = no_reg);
559 : // Set {dst} to 1 if condition holds, 0 otherwise.
560 : inline void emit_i32_eqz(Register dst, Register src);
561 : inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
562 : Register rhs);
563 : inline void emit_i64_eqz(Register dst, LiftoffRegister src);
564 : inline void emit_i64_set_cond(Condition condition, Register dst,
565 : LiftoffRegister lhs, LiftoffRegister rhs);
566 : inline void emit_f32_set_cond(Condition condition, Register dst,
567 : DoubleRegister lhs, DoubleRegister rhs);
568 : inline void emit_f64_set_cond(Condition condition, Register dst,
569 : DoubleRegister lhs, DoubleRegister rhs);
570 :
571 : inline void StackCheck(Label* ool_code, Register limit_address);
572 :
573 : inline void CallTrapCallbackForTesting();
574 :
575 : inline void AssertUnreachable(AbortReason reason);
576 :
577 : inline void PushRegisters(LiftoffRegList);
578 : inline void PopRegisters(LiftoffRegList);
579 :
580 : inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
581 :
582 : // Execute a C call. Arguments are pushed to the stack and a pointer to this
583 : // region is passed to the C function. If {out_argument_type != kWasmStmt},
584 : // this is the return value of the C function, stored in {rets[0]}. Further
585 : // outputs (specified in {sig->returns()}) are read from the buffer and stored
586 : // in the remaining {rets} registers.
587 : inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
588 : const LiftoffRegister* rets, ValueType out_argument_type,
589 : int stack_bytes, ExternalReference ext_ref);
590 :
591 : inline void CallNativeWasmCode(Address addr);
592 : // Indirect call: If {target == no_reg}, then pop the target from the stack.
593 : inline void CallIndirect(FunctionSig* sig,
594 : compiler::CallDescriptor* call_descriptor,
595 : Register target);
596 : inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
597 :
598 : // Reserve space in the current frame, store address to space in {addr}.
599 : inline void AllocateStackSlot(Register addr, uint32_t size);
600 : inline void DeallocateStackSlot(uint32_t size);
601 :
602 : ////////////////////////////////////
603 : // End of platform-specific part. //
604 : ////////////////////////////////////
605 :
606 : uint32_t num_locals() const { return num_locals_; }
607 : void set_num_locals(uint32_t num_locals);
608 :
609 : uint32_t GetTotalFrameSlotCount() const {
610 1324962 : return num_locals_ + num_used_spill_slots_;
611 : }
612 :
613 : ValueType local_type(uint32_t index) {
614 : DCHECK_GT(num_locals_, index);
615 : ValueType* locals =
616 302663 : num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
617 302663 : return locals[index];
618 : }
619 :
620 : void set_local_type(uint32_t index, ValueType type) {
621 : ValueType* locals =
622 186735 : num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
623 186735 : locals[index] = type;
624 : }
625 :
626 110857 : CacheState* cache_state() { return &cache_state_; }
627 : const CacheState* cache_state() const { return &cache_state_; }
628 :
629 : bool did_bailout() { return bailout_reason_ != nullptr; }
630 : const char* bailout_reason() const { return bailout_reason_; }
631 :
632 : void bailout(const char* reason) {
633 0 : if (bailout_reason_ != nullptr) return;
634 : AbortCompilation();
635 0 : bailout_reason_ = reason;
636 : }
637 :
638 : private:
639 : uint32_t num_locals_ = 0;
640 : static constexpr uint32_t kInlineLocalTypes = 8;
641 : union {
642 : ValueType local_types_[kInlineLocalTypes];
643 : ValueType* more_local_types_;
644 : };
645 : static_assert(sizeof(ValueType) == 1,
646 : "Reconsider this inlining if ValueType gets bigger");
647 : CacheState cache_state_;
648 : uint32_t num_used_spill_slots_ = 0;
649 : const char* bailout_reason_ = nullptr;
650 :
651 : LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
652 : LiftoffRegList pinned);
653 : };
654 :
655 : std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
656 :
657 : // =======================================================================
658 : // Partially platform-independent implementations of the platform-dependent
659 : // part.
660 :
661 : #ifdef V8_TARGET_ARCH_32_BIT
662 :
663 : namespace liftoff {
664 : template <void (LiftoffAssembler::*op)(Register, Register, Register)>
665 : void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
666 : LiftoffRegister dst, LiftoffRegister lhs,
667 : LiftoffRegister rhs) {
668 : // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()},
669 : // just first compute the lower half, then the upper half.
670 : if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
671 : (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
672 : (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
673 : return;
674 : }
675 : // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()},
676 : // we can compute this the other way around.
677 : if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
678 : (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
679 : (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
680 : return;
681 : }
682 : // Otherwise, we need a temporary register.
683 : Register tmp =
684 : assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
685 : (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
686 : (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
687 : assm->Move(dst.low_gp(), tmp, kWasmI32);
688 : }
689 : } // namespace liftoff
690 :
691 : void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
692 : LiftoffRegister rhs) {
693 : liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
694 : this, dst, lhs, rhs);
695 : }
696 :
697 : void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
698 : LiftoffRegister rhs) {
699 : liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
700 : this, dst, lhs, rhs);
701 : }
702 :
703 : void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
704 : LiftoffRegister rhs) {
705 : liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
706 : this, dst, lhs, rhs);
707 : }
708 :
709 : #endif // V8_TARGET_ARCH_32_BIT
710 :
711 : // End of the partially platform-independent implementations of the
712 : // platform-dependent part.
713 : // =======================================================================
714 :
715 47749 : class LiftoffStackSlots {
716 : public:
717 47748 : explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
718 :
719 : void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
720 : RegPairHalf half) {
721 19039 : slots_.emplace_back(src, src_index, half);
722 : }
723 20 : void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
724 :
725 : inline void Construct();
726 :
727 : private:
728 : struct Slot {
729 : // Allow move construction.
730 : Slot(Slot&&) V8_NOEXCEPT = default;
731 : Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
732 : RegPairHalf half)
733 19039 : : src_(src), src_index_(src_index), half_(half) {}
734 : explicit Slot(const LiftoffAssembler::VarState& src)
735 20 : : src_(src), half_(kLowWord) {}
736 :
737 : const LiftoffAssembler::VarState src_;
738 : uint32_t src_index_ = 0;
739 : RegPairHalf half_;
740 : };
741 :
742 : base::SmallVector<Slot, 8> slots_;
743 : LiftoffAssembler* const asm_;
744 :
745 : DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
746 : };
747 :
748 : } // namespace wasm
749 : } // namespace internal
750 : } // namespace v8
751 :
752 : // Include platform specific implementation.
753 : #if V8_TARGET_ARCH_IA32
754 : #include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
755 : #elif V8_TARGET_ARCH_X64
756 : #include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
757 : #elif V8_TARGET_ARCH_ARM64
758 : #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
759 : #elif V8_TARGET_ARCH_ARM
760 : #include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
761 : #elif V8_TARGET_ARCH_PPC
762 : #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
763 : #elif V8_TARGET_ARCH_MIPS
764 : #include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
765 : #elif V8_TARGET_ARCH_MIPS64
766 : #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
767 : #elif V8_TARGET_ARCH_S390
768 : #include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
769 : #else
770 : #error Unsupported architecture.
771 : #endif
772 :
773 : #endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
|