LCOV - code coverage report
Current view: top level - src/wasm/baseline - liftoff-assembler.h (source / functions) Hit Total Coverage
Test: app.info Lines: 40 47 85.1 %
Date: 2019-01-20 Functions: 5 7 71.4 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
       6             : #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
       7             : 
       8             : #include <iosfwd>
       9             : #include <memory>
      10             : 
      11             : #include "src/base/bits.h"
      12             : #include "src/base/small-vector.h"
      13             : #include "src/frames.h"
      14             : #include "src/macro-assembler.h"
      15             : #include "src/wasm/baseline/liftoff-assembler-defs.h"
      16             : #include "src/wasm/baseline/liftoff-register.h"
      17             : #include "src/wasm/function-body-decoder.h"
      18             : #include "src/wasm/wasm-code-manager.h"
      19             : #include "src/wasm/wasm-module.h"
      20             : #include "src/wasm/wasm-opcodes.h"
      21             : #include "src/wasm/wasm-value.h"
      22             : 
      23             : namespace v8 {
      24             : namespace internal {
      25             : 
      26             : // Forward declarations.
      27             : namespace compiler {
      28             : class CallDescriptor;
      29             : }
      30             : 
      31             : namespace wasm {
      32             : 
      33             : class LiftoffAssembler : public TurboAssembler {
      34             :  public:
      35             :   // Each slot in our stack frame currently has exactly 8 bytes.
      36             :   static constexpr uint32_t kStackSlotSize = 8;
      37             : 
      38             :   static constexpr ValueType kWasmIntPtr =
      39             :       kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
      40             : 
      41             :   class VarState {
      42             :    public:
      43             :     enum Location : uint8_t { kStack, kRegister, KIntConst };
      44             : 
      45             :     explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
      46             :     explicit VarState(ValueType type, LiftoffRegister r)
      47     2606841 :         : loc_(kRegister), type_(type), reg_(r) {
      48             :       DCHECK_EQ(r.reg_class(), reg_class_for(type));
      49             :     }
      50             :     explicit VarState(ValueType type, int32_t i32_const)
      51     1690095 :         : loc_(KIntConst), type_(type), i32_const_(i32_const) {
      52             :       DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
      53             :     }
      54             : 
      55             :     bool operator==(const VarState& other) const {
      56             :       if (loc_ != other.loc_) return false;
      57             :       if (type_ != other.type_) return false;
      58             :       switch (loc_) {
      59             :         case kStack:
      60             :           return true;
      61             :         case kRegister:
      62             :           return reg_ == other.reg_;
      63             :         case KIntConst:
      64             :           return i32_const_ == other.i32_const_;
      65             :       }
      66             :       UNREACHABLE();
      67             :     }
      68             : 
      69             :     bool is_stack() const { return loc_ == kStack; }
      70             :     bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
      71             :     bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
      72             :     bool is_reg() const { return loc_ == kRegister; }
      73             :     bool is_const() const { return loc_ == KIntConst; }
      74             : 
      75             :     ValueType type() const { return type_; }
      76             : 
      77             :     Location loc() const { return loc_; }
      78             : 
      79             :     int32_t i32_const() const {
      80             :       DCHECK_EQ(loc_, KIntConst);
      81             :       return i32_const_;
      82             :     }
      83             :     WasmValue constant() const {
      84             :       DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
      85             :       DCHECK_EQ(loc_, KIntConst);
      86             :       return type_ == kWasmI32 ? WasmValue(i32_const_)
      87     1595985 :                                : WasmValue(int64_t{i32_const_});
      88             :     }
      89             : 
      90             :     Register gp_reg() const { return reg().gp(); }
      91             :     DoubleRegister fp_reg() const { return reg().fp(); }
      92             :     LiftoffRegister reg() const {
      93             :       DCHECK_EQ(loc_, kRegister);
      94             :       return reg_;
      95             :     }
      96             :     RegClass reg_class() const { return reg().reg_class(); }
      97             : 
      98      583811 :     void MakeStack() { loc_ = kStack; }
      99             : 
     100             :    private:
     101             :     Location loc_;
     102             :     // TODO(wasm): This is redundant, the decoder already knows the type of each
     103             :     // stack value. Try to collapse.
     104             :     ValueType type_;
     105             : 
     106             :     union {
     107             :       LiftoffRegister reg_;  // used if loc_ == kRegister
     108             :       int32_t i32_const_;    // used if loc_ == KIntConst
     109             :     };
     110             :   };
     111             : 
     112             :   ASSERT_TRIVIALLY_COPYABLE(VarState);
     113             : 
     114      146686 :   struct CacheState {
     115             :     // Allow default construction, move construction, and move assignment.
     116     1588587 :     CacheState() = default;
     117      346199 :     CacheState(CacheState&&) V8_NOEXCEPT = default;
     118             :     CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
     119             : 
     120             :     base::SmallVector<VarState, 8> stack_state;
     121             :     LiftoffRegList used_registers;
     122             :     uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
     123             :     LiftoffRegList last_spilled_regs;
     124             : 
     125             :     bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
     126             :       if (kNeedI64RegPair && rc == kGpRegPair) {
     127             :         LiftoffRegList available_regs =
     128             :             kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
     129             :         return available_regs.GetNumRegsSet() >= 2;
     130             :       }
     131             :       DCHECK(rc == kGpReg || rc == kFpReg);
     132             :       LiftoffRegList candidates = GetCacheRegList(rc);
     133             :       return has_unused_register(candidates, pinned);
     134             :     }
     135             : 
     136             :     bool has_unused_register(LiftoffRegList candidates,
     137             :                              LiftoffRegList pinned = {}) const {
     138             :       LiftoffRegList available_regs =
     139             :           candidates.MaskOut(used_registers).MaskOut(pinned);
     140             :       return !available_regs.is_empty();
     141             :     }
     142             : 
     143             :     LiftoffRegister unused_register(RegClass rc,
     144             :                                     LiftoffRegList pinned = {}) const {
     145             :       if (kNeedI64RegPair && rc == kGpRegPair) {
     146             :         Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
     147             :         Register high = unused_register(kGpReg, pinned).gp();
     148             :         return LiftoffRegister::ForPair(low, high);
     149             :       }
     150             :       DCHECK(rc == kGpReg || rc == kFpReg);
     151             :       LiftoffRegList candidates = GetCacheRegList(rc);
     152             :       return unused_register(candidates, pinned);
     153             :     }
     154             : 
     155             :     LiftoffRegister unused_register(LiftoffRegList candidates,
     156             :                                     LiftoffRegList pinned = {}) const {
     157             :       LiftoffRegList available_regs =
     158             :           candidates.MaskOut(used_registers).MaskOut(pinned);
     159             :       return available_regs.GetFirstRegSet();
     160             :     }
     161             : 
     162             :     void inc_used(LiftoffRegister reg) {
     163             :       if (reg.is_pair()) {
     164             :         inc_used(reg.low());
     165             :         inc_used(reg.high());
     166             :         return;
     167             :       }
     168             :       used_registers.set(reg);
     169             :       DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
     170     2632053 :       ++register_use_count[reg.liftoff_code()];
     171             :     }
     172             : 
     173             :     // Returns whether this was the last use.
     174             :     void dec_used(LiftoffRegister reg) {
     175             :       DCHECK(is_used(reg));
     176             :       if (reg.is_pair()) {
     177             :         dec_used(reg.low());
     178             :         dec_used(reg.high());
     179             :         return;
     180             :       }
     181             :       int code = reg.liftoff_code();
     182             :       DCHECK_LT(0, register_use_count[code]);
     183     1106960 :       if (--register_use_count[code] == 0) used_registers.clear(reg);
     184             :     }
     185             : 
     186             :     bool is_used(LiftoffRegister reg) const {
     187             :       if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
     188             :       bool used = used_registers.has(reg);
     189             :       DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
     190             :       return used;
     191             :     }
     192             : 
     193             :     uint32_t get_use_count(LiftoffRegister reg) const {
     194             :       if (reg.is_pair()) {
     195             :         DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
     196             :                   register_use_count[reg.high().liftoff_code()]);
     197             :         reg = reg.low();
     198             :       }
     199             :       DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
     200      114438 :       return register_use_count[reg.liftoff_code()];
     201             :     }
     202             : 
     203             :     void clear_used(LiftoffRegister reg) {
     204      114439 :       register_use_count[reg.liftoff_code()] = 0;
     205             :       used_registers.clear(reg);
     206             :     }
     207             : 
     208             :     bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
     209             : 
     210             :     void reset_used_registers() {
     211      175296 :       used_registers = {};
     212      175296 :       memset(register_use_count, 0, sizeof(register_use_count));
     213             :     }
     214             : 
     215             :     LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
     216             :                                     LiftoffRegList pinned = {}) {
     217             :       LiftoffRegList unpinned = candidates.MaskOut(pinned);
     218             :       DCHECK(!unpinned.is_empty());
     219             :       // This method should only be called if none of the candidates is free.
     220             :       DCHECK(unpinned.MaskOut(used_registers).is_empty());
     221             :       LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
     222      107594 :       if (unspilled.is_empty()) {
     223             :         unspilled = unpinned;
     224       10456 :         last_spilled_regs = {};
     225             :       }
     226             :       LiftoffRegister reg = unspilled.GetFirstRegSet();
     227             :       last_spilled_regs.set(reg);
     228             :       return reg;
     229             :     }
     230             : 
     231             :     // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
     232             :     void InitMerge(const CacheState& source, uint32_t num_locals,
     233             :                    uint32_t arity, uint32_t stack_depth);
     234             : 
     235             :     void Steal(const CacheState& source);
     236             : 
     237             :     void Split(const CacheState& source);
     238             : 
     239             :     uint32_t stack_height() const {
     240     4675546 :       return static_cast<uint32_t>(stack_state.size());
     241             :     }
     242             : 
     243             :    private:
     244             :     // Make the copy assignment operator private (to be used from {Split()}).
     245             :     CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
     246             :     // Disallow copy construction.
     247             :     CacheState(const CacheState&) = delete;
     248             :   };
     249             : 
     250             :   LiftoffAssembler();
     251             :   ~LiftoffAssembler() override;
     252             : 
     253             :   LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
     254             : 
     255             :   void PushRegister(ValueType type, LiftoffRegister reg) {
     256             :     DCHECK_EQ(reg_class_for(type), reg.reg_class());
     257             :     cache_state_.inc_used(reg);
     258     2606979 :     cache_state_.stack_state.emplace_back(type, reg);
     259             :   }
     260             : 
     261             :   void SpillRegister(LiftoffRegister);
     262             : 
     263             :   uint32_t GetNumUses(LiftoffRegister reg) {
     264             :     return cache_state_.get_use_count(reg);
     265             :   }
     266             : 
     267             :   // Get an unused register for class {rc}, reusing one of {try_first} if
     268             :   // possible.
     269     1037119 :   LiftoffRegister GetUnusedRegister(
     270             :       RegClass rc, std::initializer_list<LiftoffRegister> try_first,
     271             :       LiftoffRegList pinned = {}) {
     272     1095293 :     for (LiftoffRegister reg : try_first) {
     273             :       DCHECK_EQ(reg.reg_class(), rc);
     274     1066244 :       if (cache_state_.is_free(reg)) return reg;
     275             :     }
     276             :     return GetUnusedRegister(rc, pinned);
     277             :   }
     278             : 
     279             :   // Get an unused register for class {rc}, potentially spilling to free one.
     280             :   LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
     281             :     if (kNeedI64RegPair && rc == kGpRegPair) {
     282             :       LiftoffRegList candidates = kGpCacheRegList;
     283             :       Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
     284             :       Register high = GetUnusedRegister(candidates, pinned).gp();
     285             :       return LiftoffRegister::ForPair(low, high);
     286             :     }
     287             :     DCHECK(rc == kGpReg || rc == kFpReg);
     288     2902978 :     LiftoffRegList candidates = GetCacheRegList(rc);
     289     2902978 :     return GetUnusedRegister(candidates, pinned);
     290             :   }
     291             : 
     292             :   // Get an unused register of {candidates}, potentially spilling to free one.
     293     2902669 :   LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
     294             :                                     LiftoffRegList pinned = {}) {
     295     2902669 :     if (cache_state_.has_unused_register(candidates, pinned)) {
     296             :       return cache_state_.unused_register(candidates, pinned);
     297             :     }
     298      107594 :     return SpillOneRegister(candidates, pinned);
     299             :   }
     300             : 
     301             :   void MergeFullStackWith(const CacheState& target, const CacheState& source);
     302             :   void MergeStackWith(const CacheState& target, uint32_t arity);
     303             : 
     304             :   void Spill(uint32_t index);
     305             :   void SpillLocals();
     306             :   void SpillAllRegisters();
     307             : 
     308             :   // Call this method whenever spilling something, such that the number of used
     309             :   // spill slot can be tracked and the stack frame will be allocated big enough.
     310             :   void RecordUsedSpillSlot(uint32_t index) {
     311      585347 :     if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
     312             :   }
     313             : 
     314             :   // Load parameters into the right registers / stack slots for the call.
     315             :   // Move {*target} into another register if needed and update {*target} to that
     316             :   // register, or {no_reg} if target was spilled to the stack.
     317             :   void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
     318             :                    Register* target = nullptr,
     319             :                    Register* target_instance = nullptr);
     320             :   // Process return values of the call.
     321             :   void FinishCall(FunctionSig*, compiler::CallDescriptor*);
     322             : 
     323             :   // Move {src} into {dst}. {src} and {dst} must be different.
     324             :   void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
     325             : 
     326             :   // Parallel register move: For a list of tuples <dst, src, type>, move the
     327             :   // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
     328             :   // that tuple.
     329             :   struct ParallelRegisterMoveTuple {
     330             :     LiftoffRegister dst;
     331             :     LiftoffRegister src;
     332             :     ValueType type;
     333             :     template <typename Dst, typename Src>
     334             :     ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
     335             :         : dst(dst), src(src), type(type) {}
     336             :   };
     337             :   void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
     338             : 
     339             :   void MoveToReturnRegisters(FunctionSig*);
     340             : 
     341             : #ifdef ENABLE_SLOW_DCHECKS
     342             :   // Validate that the register use counts reflect the state of the cache.
     343             :   bool ValidateCacheState() const;
     344             : #endif
     345             : 
     346             :   ////////////////////////////////////
     347             :   // Platform-specific part.        //
     348             :   ////////////////////////////////////
     349             : 
     350             :   // This function emits machine code to prepare the stack frame, before the
     351             :   // size of the stack frame is known. It returns an offset in the machine code
     352             :   // which can later be patched (via {PatchPrepareStackFrame)} when the size of
     353             :   // the frame is known.
     354             :   inline int PrepareStackFrame();
     355             :   inline void PatchPrepareStackFrame(int offset, uint32_t stack_slots);
     356             :   inline void FinishCode();
     357             :   inline void AbortCompilation();
     358             : 
     359             :   inline void LoadConstant(LiftoffRegister, WasmValue,
     360             :                            RelocInfo::Mode rmode = RelocInfo::NONE);
     361             :   inline void LoadFromInstance(Register dst, uint32_t offset, int size);
     362             :   inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
     363             :   inline void SpillInstance(Register instance);
     364             :   inline void FillInstanceInto(Register dst);
     365             :   inline void LoadTaggedPointer(Register dst, Register src_addr,
     366             :                                 Register offset_reg, uint32_t offset_imm,
     367             :                                 LiftoffRegList pinned);
     368             :   inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
     369             :                    uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
     370             :                    uint32_t* protected_load_pc = nullptr,
     371             :                    bool is_load_mem = false);
     372             :   inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
     373             :                     LiftoffRegister src, StoreType type, LiftoffRegList pinned,
     374             :                     uint32_t* protected_store_pc = nullptr,
     375             :                     bool is_store_mem = false);
     376             :   inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
     377             :                                   ValueType);
     378             :   inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
     379             : 
     380             :   inline void Move(Register dst, Register src, ValueType);
     381             :   inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
     382             : 
     383             :   inline void Spill(uint32_t index, LiftoffRegister, ValueType);
     384             :   inline void Spill(uint32_t index, WasmValue);
     385             :   inline void Fill(LiftoffRegister, uint32_t index, ValueType);
     386             :   // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
     387             :   // 4 bytes on the stack holding half of a 64-bit value.
     388             :   inline void FillI64Half(Register, uint32_t index, RegPairHalf);
     389             : 
     390             :   // i32 binops.
     391             :   inline void emit_i32_add(Register dst, Register lhs, Register rhs);
     392             :   inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
     393             :   inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
     394             :   inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
     395             :                             Label* trap_div_by_zero,
     396             :                             Label* trap_div_unrepresentable);
     397             :   inline void emit_i32_divu(Register dst, Register lhs, Register rhs,
     398             :                             Label* trap_div_by_zero);
     399             :   inline void emit_i32_rems(Register dst, Register lhs, Register rhs,
     400             :                             Label* trap_rem_by_zero);
     401             :   inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
     402             :                             Label* trap_rem_by_zero);
     403             :   inline void emit_i32_and(Register dst, Register lhs, Register rhs);
     404             :   inline void emit_i32_or(Register dst, Register lhs, Register rhs);
     405             :   inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
     406             :   inline void emit_i32_shl(Register dst, Register src, Register amount,
     407             :                            LiftoffRegList pinned = {});
     408             :   inline void emit_i32_sar(Register dst, Register src, Register amount,
     409             :                            LiftoffRegList pinned = {});
     410             :   inline void emit_i32_shr(Register dst, Register src, Register amount,
     411             :                            LiftoffRegList pinned = {});
     412             :   inline void emit_i32_shr(Register dst, Register src, int amount);
     413             : 
     414             :   // i32 unops.
     415             :   inline bool emit_i32_clz(Register dst, Register src);
     416             :   inline bool emit_i32_ctz(Register dst, Register src);
     417             :   inline bool emit_i32_popcnt(Register dst, Register src);
     418             : 
     419             :   // i64 binops.
     420             :   inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
     421             :                            LiftoffRegister rhs);
     422             :   inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
     423             :                            LiftoffRegister rhs);
     424             :   inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
     425             :                            LiftoffRegister rhs);
     426             :   inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
     427             :                             LiftoffRegister rhs, Label* trap_div_by_zero,
     428             :                             Label* trap_div_unrepresentable);
     429             :   inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
     430             :                             LiftoffRegister rhs, Label* trap_div_by_zero);
     431             :   inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
     432             :                             LiftoffRegister rhs, Label* trap_rem_by_zero);
     433             :   inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
     434             :                             LiftoffRegister rhs, Label* trap_rem_by_zero);
     435             :   inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
     436             :                            LiftoffRegister rhs);
     437             :   inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
     438             :                           LiftoffRegister rhs);
     439             :   inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
     440             :                            LiftoffRegister rhs);
     441             :   inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
     442             :                            Register amount, LiftoffRegList pinned = {});
     443             :   inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
     444             :                            Register amount, LiftoffRegList pinned = {});
     445             :   inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
     446             :                            Register amount, LiftoffRegList pinned = {});
     447             :   inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
     448             :                            int amount);
     449             : 
     450             :   inline void emit_i32_to_intptr(Register dst, Register src);
     451             : 
     452           0 :   inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
     453             :     if (kSystemPointerSize == 8) {
     454             :       emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
     455           0 :                    LiftoffRegister(rhs));
     456             :     } else {
     457             :       emit_i32_add(dst, lhs, rhs);
     458             :     }
     459           0 :   }
     460          36 :   inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
     461             :     if (kSystemPointerSize == 8) {
     462             :       emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
     463          36 :                    LiftoffRegister(rhs));
     464             :     } else {
     465             :       emit_i32_sub(dst, lhs, rhs);
     466             :     }
     467          36 :   }
     468           0 :   inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
     469             :     if (kSystemPointerSize == 8) {
     470             :       emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
     471             :                    LiftoffRegister(rhs));
     472             :     } else {
     473             :       emit_i32_and(dst, lhs, rhs);
     474             :     }
     475           0 :   }
     476             :   inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
     477             :     if (kSystemPointerSize == 8) {
     478         546 :       emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
     479             :     } else {
     480             :       emit_i32_shr(dst, src, amount);
     481             :     }
     482             :   }
     483             : 
     484             :   // f32 binops.
     485             :   inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
     486             :                            DoubleRegister rhs);
     487             :   inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
     488             :                            DoubleRegister rhs);
     489             :   inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
     490             :                            DoubleRegister rhs);
     491             :   inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
     492             :                            DoubleRegister rhs);
     493             :   inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
     494             :                            DoubleRegister rhs);
     495             :   inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
     496             :                            DoubleRegister rhs);
     497             :   inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
     498             :                                 DoubleRegister rhs);
     499             : 
     500             :   // f32 unops.
     501             :   inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
     502             :   inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
     503             :   inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
     504             :   inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src);
     505             :   inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
     506             :   inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
     507             :   inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
     508             : 
     509             :   // f64 binops.
     510             :   inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
     511             :                            DoubleRegister rhs);
     512             :   inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
     513             :                            DoubleRegister rhs);
     514             :   inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
     515             :                            DoubleRegister rhs);
     516             :   inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
     517             :                            DoubleRegister rhs);
     518             :   inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
     519             :                            DoubleRegister rhs);
     520             :   inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
     521             :                            DoubleRegister rhs);
     522             :   inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
     523             :                                 DoubleRegister rhs);
     524             : 
     525             :   // f64 unops.
     526             :   inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
     527             :   inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
     528             :   inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
     529             :   inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
     530             :   inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
     531             :   inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
     532             :   inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
     533             : 
     534             :   inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
     535             :                                    LiftoffRegister src, Label* trap = nullptr);
     536             : 
     537             :   inline void emit_i32_signextend_i8(Register dst, Register src);
     538             :   inline void emit_i32_signextend_i16(Register dst, Register src);
     539             :   inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src);
     540             :   inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src);
     541             :   inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src);
     542             : 
     543             :   inline void emit_jump(Label*);
     544             :   inline void emit_jump(Register);
     545             : 
     546             :   inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
     547             :                              Register rhs = no_reg);
     548             :   // Set {dst} to 1 if condition holds, 0 otherwise.
     549             :   inline void emit_i32_eqz(Register dst, Register src);
     550             :   inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
     551             :                                 Register rhs);
     552             :   inline void emit_i64_eqz(Register dst, LiftoffRegister src);
     553             :   inline void emit_i64_set_cond(Condition condition, Register dst,
     554             :                                 LiftoffRegister lhs, LiftoffRegister rhs);
     555             :   inline void emit_f32_set_cond(Condition condition, Register dst,
     556             :                                 DoubleRegister lhs, DoubleRegister rhs);
     557             :   inline void emit_f64_set_cond(Condition condition, Register dst,
     558             :                                 DoubleRegister lhs, DoubleRegister rhs);
     559             : 
     560             :   inline void StackCheck(Label* ool_code, Register limit_address);
     561             : 
     562             :   inline void CallTrapCallbackForTesting();
     563             : 
     564             :   inline void AssertUnreachable(AbortReason reason);
     565             : 
     566             :   inline void PushRegisters(LiftoffRegList);
     567             :   inline void PopRegisters(LiftoffRegList);
     568             : 
     569             :   inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
     570             : 
     571             :   // Execute a C call. Arguments are pushed to the stack and a pointer to this
     572             :   // region is passed to the C function. If {out_argument_type != kWasmStmt},
     573             :   // this is the return value of the C function, stored in {rets[0]}. Further
     574             :   // outputs (specified in {sig->returns()}) are read from the buffer and stored
     575             :   // in the remaining {rets} registers.
     576             :   inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
     577             :                     const LiftoffRegister* rets, ValueType out_argument_type,
     578             :                     int stack_bytes, ExternalReference ext_ref);
     579             : 
     580             :   inline void CallNativeWasmCode(Address addr);
     581             :   // Indirect call: If {target == no_reg}, then pop the target from the stack.
     582             :   inline void CallIndirect(FunctionSig* sig,
     583             :                            compiler::CallDescriptor* call_descriptor,
     584             :                            Register target);
     585             :   inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
     586             : 
     587             :   // Reserve space in the current frame, store address to space in {addr}.
     588             :   inline void AllocateStackSlot(Register addr, uint32_t size);
     589             :   inline void DeallocateStackSlot(uint32_t size);
     590             : 
     591             :   ////////////////////////////////////
     592             :   // End of platform-specific part. //
     593             :   ////////////////////////////////////
     594             : 
     595             :   uint32_t num_locals() const { return num_locals_; }
     596             :   void set_num_locals(uint32_t num_locals);
     597             : 
     598             :   uint32_t GetTotalFrameSlotCount() const {
     599     2061867 :     return num_locals_ + num_used_spill_slots_;
     600             :   }
     601             : 
     602             :   ValueType local_type(uint32_t index) {
     603             :     DCHECK_GT(num_locals_, index);
     604             :     ValueType* locals =
     605      915587 :         num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
     606      915587 :     return locals[index];
     607             :   }
     608             : 
     609             :   void set_local_type(uint32_t index, ValueType type) {
     610             :     ValueType* locals =
     611      729839 :         num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
     612      729839 :     locals[index] = type;
     613             :   }
     614             : 
     615             :   CacheState* cache_state() { return &cache_state_; }
     616             :   const CacheState* cache_state() const { return &cache_state_; }
     617             : 
     618             :   bool did_bailout() { return bailout_reason_ != nullptr; }
     619             :   const char* bailout_reason() const { return bailout_reason_; }
     620             : 
     621             :   void bailout(const char* reason) {
     622           0 :     if (bailout_reason_ != nullptr) return;
     623             :     AbortCompilation();
     624           0 :     bailout_reason_ = reason;
     625             :   }
     626             : 
     627             :  private:
     628             :   uint32_t num_locals_ = 0;
     629             :   static constexpr uint32_t kInlineLocalTypes = 8;
     630             :   union {
     631             :     ValueType local_types_[kInlineLocalTypes];
     632             :     ValueType* more_local_types_;
     633             :   };
     634             :   static_assert(sizeof(ValueType) == 1,
     635             :                 "Reconsider this inlining if ValueType gets bigger");
     636             :   CacheState cache_state_;
     637             :   uint32_t num_used_spill_slots_ = 0;
     638             :   const char* bailout_reason_ = nullptr;
     639             : 
     640             :   LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
     641             :                                    LiftoffRegList pinned);
     642             : };
     643             : 
     644             : std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
     645             : 
     646             : // =======================================================================
     647             : // Partially platform-independent implementations of the platform-dependent
     648             : // part.
     649             : 
     650             : #ifdef V8_TARGET_ARCH_32_BIT
     651             : 
     652             : namespace liftoff {
     653             : template <void (LiftoffAssembler::*op)(Register, Register, Register)>
     654             : void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
     655             :                                      LiftoffRegister dst, LiftoffRegister lhs,
     656             :                                      LiftoffRegister rhs) {
     657             :   // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()},
     658             :   // just first compute the lower half, then the upper half.
     659             :   if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
     660             :     (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
     661             :     (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
     662             :     return;
     663             :   }
     664             :   // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()},
     665             :   // we can compute this the other way around.
     666             :   if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
     667             :     (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
     668             :     (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
     669             :     return;
     670             :   }
     671             :   // Otherwise, we need a temporary register.
     672             :   Register tmp =
     673             :       assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
     674             :   (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
     675             :   (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
     676             :   assm->Move(dst.low_gp(), tmp, kWasmI32);
     677             : }
     678             : }  // namespace liftoff
     679             : 
     680             : void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
     681             :                                     LiftoffRegister rhs) {
     682             :   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
     683             :       this, dst, lhs, rhs);
     684             : }
     685             : 
     686             : void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
     687             :                                    LiftoffRegister rhs) {
     688             :   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
     689             :       this, dst, lhs, rhs);
     690             : }
     691             : 
     692             : void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
     693             :                                     LiftoffRegister rhs) {
     694             :   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
     695             :       this, dst, lhs, rhs);
     696             : }
     697             : 
     698             : #endif  // V8_TARGET_ARCH_32_BIT
     699             : 
     700             : // End of the partially platform-independent implementations of the
     701             : // platform-dependent part.
     702             : // =======================================================================
     703             : 
     704             : class LiftoffStackSlots {
     705             :  public:
     706      140155 :   explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
     707             : 
     708             :   void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
     709             :            RegPairHalf half) {
     710       37159 :     slots_.emplace_back(src, src_index, half);
     711             :   }
     712          50 :   void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
     713             : 
     714             :   inline void Construct();
     715             : 
     716             :  private:
     717             :   struct Slot {
     718             :     // Allow move construction.
     719             :     Slot(Slot&&) V8_NOEXCEPT = default;
     720             :     Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
     721             :          RegPairHalf half)
     722       37159 :         : src_(src), src_index_(src_index), half_(half) {}
     723             :     explicit Slot(const LiftoffAssembler::VarState& src)
     724          50 :         : src_(src), half_(kLowWord) {}
     725             : 
     726             :     const LiftoffAssembler::VarState src_;
     727             :     uint32_t src_index_ = 0;
     728             :     RegPairHalf half_;
     729             :   };
     730             : 
     731             :   base::SmallVector<Slot, 8> slots_;
     732             :   LiftoffAssembler* const asm_;
     733             : 
     734             :   DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
     735             : };
     736             : 
     737             : }  // namespace wasm
     738             : }  // namespace internal
     739             : }  // namespace v8
     740             : 
     741             : // Include platform specific implementation.
     742             : #if V8_TARGET_ARCH_IA32
     743             : #include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
     744             : #elif V8_TARGET_ARCH_X64
     745             : #include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
     746             : #elif V8_TARGET_ARCH_ARM64
     747             : #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
     748             : #elif V8_TARGET_ARCH_ARM
     749             : #include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
     750             : #elif V8_TARGET_ARCH_PPC
     751             : #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
     752             : #elif V8_TARGET_ARCH_MIPS
     753             : #include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
     754             : #elif V8_TARGET_ARCH_MIPS64
     755             : #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
     756             : #elif V8_TARGET_ARCH_S390
     757             : #include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
     758             : #else
     759             : #error Unsupported architecture.
     760             : #endif
     761             : 
     762             : #endif  // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_

Generated by: LCOV version 1.10