Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/baseline/liftoff-compiler.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/base/optional.h"
9 : // TODO(clemensh): Remove dependences on compiler stuff.
10 : #include "src/compiler/linkage.h"
11 : #include "src/compiler/wasm-compiler.h"
12 : #include "src/counters.h"
13 : #include "src/interface-descriptors.h"
14 : #include "src/macro-assembler-inl.h"
15 : #include "src/objects/smi.h"
16 : #include "src/tracing/trace-event.h"
17 : #include "src/utils.h"
18 : #include "src/wasm/baseline/liftoff-assembler.h"
19 : #include "src/wasm/function-body-decoder-impl.h"
20 : #include "src/wasm/function-compiler.h"
21 : #include "src/wasm/memory-tracing.h"
22 : #include "src/wasm/object-access.h"
23 : #include "src/wasm/wasm-engine.h"
24 : #include "src/wasm/wasm-linkage.h"
25 : #include "src/wasm/wasm-objects.h"
26 : #include "src/wasm/wasm-opcodes.h"
27 :
28 : namespace v8 {
29 : namespace internal {
30 : namespace wasm {
31 :
32 : constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
33 : constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
34 : constexpr auto kStack = LiftoffAssembler::VarState::kStack;
35 :
36 : namespace {
37 :
38 : #define __ asm_.
39 :
40 : #define TRACE(...) \
41 : do { \
42 : if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
43 : } while (false)
44 :
45 : #define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
46 : ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
47 :
48 : template <int expected_size, int actual_size>
49 : struct assert_field_size {
50 : static_assert(expected_size == actual_size,
51 : "field in WasmInstance does not have the expected size");
52 : static constexpr int size = actual_size;
53 : };
54 :
55 : #define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
56 : FIELD_SIZE(WasmInstanceObject::k##name##Offset)
57 :
58 : #define LOAD_INSTANCE_FIELD(dst, name, load_size) \
59 : __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
60 : assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
61 : load_size>::size);
62 :
63 : #define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
64 : static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
65 : "field in WasmInstance does not have the expected size"); \
66 : __ LoadTaggedPointerFromInstance(dst, \
67 : WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
68 :
69 : #ifdef DEBUG
70 : #define DEBUG_CODE_COMMENT(str) \
71 : do { \
72 : __ RecordComment(str); \
73 : } while (false)
74 : #else
75 : #define DEBUG_CODE_COMMENT(str) ((void)0)
76 : #endif
77 :
78 : constexpr LoadType::LoadTypeValue kPointerLoadType =
79 : kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
80 :
81 : #if V8_TARGET_ARCH_ARM64
82 : // On ARM64, the Assembler keeps track of pointers to Labels to resolve
83 : // branches to distant targets. Moving labels would confuse the Assembler,
84 : // thus store the label on the heap and keep a unique_ptr.
85 : class MovableLabel {
86 : public:
87 : MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
88 : MovableLabel() : label_(new Label()) {}
89 :
90 : Label* get() { return label_.get(); }
91 :
92 : private:
93 : std::unique_ptr<Label> label_;
94 : };
95 : #else
96 : // On all other platforms, just store the Label directly.
97 : class MovableLabel {
98 : public:
99 : MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
100 :
101 : Label* get() { return &label_; }
102 :
103 : private:
104 : Label label_;
105 : };
106 : #endif
107 :
108 : compiler::CallDescriptor* GetLoweredCallDescriptor(
109 : Zone* zone, compiler::CallDescriptor* call_desc) {
110 : return kSystemPointerSize == 4
111 : ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
112 : : call_desc;
113 : }
114 :
115 : constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
116 : kWasmF64};
117 : constexpr Vector<const ValueType> kSupportedTypes =
118 : ArrayVector(kSupportedTypesArr);
119 :
120 : class LiftoffCompiler {
121 : public:
122 : // TODO(clemensh): Make this a template parameter.
123 : static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
124 :
125 : using Value = ValueBase;
126 :
127 : struct ElseState {
128 : MovableLabel label;
129 : LiftoffAssembler::CacheState state;
130 : };
131 :
132 2092603 : struct Control : public ControlBase<Value> {
133 : std::unique_ptr<ElseState> else_state;
134 : LiftoffAssembler::CacheState label_state;
135 : MovableLabel label;
136 :
137 346190 : MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
138 :
139 : template <typename... Args>
140 873347 : explicit Control(Args&&... args) V8_NOEXCEPT
141 2620041 : : ControlBase(std::forward<Args>(args)...) {}
142 : };
143 :
144 : using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
145 :
146 : struct OutOfLineCode {
147 : MovableLabel label;
148 : MovableLabel continuation;
149 : WasmCode::RuntimeStubId stub;
150 : WasmCodePosition position;
151 : LiftoffRegList regs_to_save;
152 : uint32_t pc; // for trap handler.
153 :
154 : // Named constructors:
155 : static OutOfLineCode Trap(WasmCode::RuntimeStubId s, WasmCodePosition pos,
156 : uint32_t pc) {
157 : DCHECK_LT(0, pos);
158 458036 : return {{}, {}, s, pos, {}, pc};
159 : }
160 : static OutOfLineCode StackCheck(WasmCodePosition pos, LiftoffRegList regs) {
161 262135 : return {{}, {}, WasmCode::kWasmStackGuard, pos, regs, 0};
162 : }
163 : };
164 :
165 711841 : LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
166 : CompilationEnv* env, Zone* compilation_zone)
167 : : descriptor_(
168 : GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
169 : env_(env),
170 : compilation_zone_(compilation_zone),
171 2847351 : safepoint_table_builder_(compilation_zone_) {}
172 :
173 1423919 : ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
174 :
175 : bool ok() const { return ok_; }
176 :
177 687034 : void GetCode(CodeDesc* desc) { asm_.GetCode(nullptr, desc); }
178 :
179 : OwnedVector<uint8_t> GetSourcePositionTable() {
180 687105 : return source_position_table_builder_.ToSourcePositionTableVector();
181 : }
182 :
183 : OwnedVector<trap_handler::ProtectedInstructionData> GetProtectedInstructions()
184 : const {
185 : return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
186 687364 : protected_instructions_);
187 : }
188 :
189 : uint32_t GetTotalFrameSlotCount() const {
190 687166 : return __ GetTotalFrameSlotCount();
191 : }
192 :
193 : void unsupported(FullDecoder* decoder, const char* reason) {
194 17776 : ok_ = false;
195 : TRACE("unsupported: %s\n", reason);
196 : decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
197 17776 : reason);
198 : BindUnboundLabels(decoder);
199 : }
200 :
201 2085301 : bool DidAssemblerBailout(FullDecoder* decoder) {
202 2085301 : if (decoder->failed() || !__ did_bailout()) return false;
203 : unsupported(decoder, __ bailout_reason());
204 0 : return true;
205 : }
206 :
207 1101239 : bool CheckSupportedType(FullDecoder* decoder,
208 : Vector<const ValueType> supported_types,
209 : ValueType type, const char* context) {
210 : char buffer[128];
211 : // Check supported types.
212 2946292 : for (ValueType supported : supported_types) {
213 2945878 : if (type == supported) return true;
214 : }
215 414 : SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
216 : unsupported(decoder, buffer);
217 414 : return false;
218 : }
219 :
220 : int GetSafepointTableOffset() const {
221 687166 : return safepoint_table_builder_.GetCodeOffset();
222 : }
223 :
224 : void BindUnboundLabels(FullDecoder* decoder) {
225 : #ifdef DEBUG
226 : // Bind all labels now, otherwise their destructor will fire a DCHECK error
227 : // if they where referenced before.
228 : uint32_t control_depth = decoder ? decoder->control_depth() : 0;
229 : for (uint32_t i = 0; i < control_depth; ++i) {
230 : Control* c = decoder->control_at(i);
231 : Label* label = c->label.get();
232 : if (!label->is_bound()) __ bind(label);
233 : if (c->else_state) {
234 : Label* else_label = c->else_state->label.get();
235 : if (!else_label->is_bound()) __ bind(else_label);
236 : }
237 : }
238 : for (auto& ool : out_of_line_code_) {
239 : if (!ool.label.get()->is_bound()) __ bind(ool.label.get());
240 : }
241 : #endif
242 : }
243 :
244 711804 : void StartFunction(FullDecoder* decoder) {
245 711804 : int num_locals = decoder->num_locals();
246 711804 : __ set_num_locals(num_locals);
247 1441639 : for (int i = 0; i < num_locals; ++i) {
248 729839 : __ set_local_type(i, decoder->GetLocalType(i));
249 : }
250 711800 : }
251 :
252 : // Returns the number of inputs processed (1 or 2).
253 185959 : uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
254 : const int num_lowered_params = 1 + needs_reg_pair(type);
255 : ValueType lowered_type = needs_reg_pair(type) ? kWasmI32 : type;
256 : RegClass rc = reg_class_for(lowered_type);
257 : // Initialize to anything, will be set in the loop and used afterwards.
258 : LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
259 : LiftoffRegList pinned;
260 371959 : for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
261 : compiler::LinkageLocation param_loc =
262 185974 : descriptor_->GetInputLocation(input_idx + pair_idx);
263 : // Initialize to anything, will be set in both arms of the if.
264 : LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
265 185987 : if (param_loc.IsRegister()) {
266 : DCHECK(!param_loc.IsAnyRegister());
267 : int reg_code = param_loc.AsRegister();
268 : #if V8_TARGET_ARCH_ARM
269 : // Liftoff assumes a one-to-one mapping between float registers and
270 : // double registers, and so does not distinguish between f32 and f64
271 : // registers. The f32 register code must therefore be halved in order to
272 : // pass the f64 code to Liftoff.
273 : DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0);
274 : if (type == kWasmF32) {
275 : reg_code /= 2;
276 : }
277 : #endif
278 : RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
279 149266 : : kLiftoffAssemblerFpCacheRegs;
280 149266 : if (cache_regs & (1ULL << reg_code)) {
281 : // This is a cache register, just use it.
282 149266 : in_reg = LiftoffRegister::from_code(rc, reg_code);
283 : } else {
284 : // Move to a cache register (spill one if necessary).
285 : // Note that we cannot create a {LiftoffRegister} for reg_code, since
286 : // {LiftoffRegister} can only store cache regs.
287 0 : in_reg = __ GetUnusedRegister(rc, pinned);
288 0 : if (rc == kGpReg) {
289 0 : __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
290 : } else {
291 : __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
292 0 : lowered_type);
293 : }
294 : }
295 36721 : } else if (param_loc.IsCallerFrameSlot()) {
296 36721 : in_reg = __ GetUnusedRegister(rc, pinned);
297 : __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
298 73442 : lowered_type);
299 : }
300 : reg = pair_idx == 0 ? in_reg
301 186000 : : LiftoffRegister::ForPair(reg.gp(), in_reg.gp());
302 : pinned.set(reg);
303 : }
304 : __ PushRegister(type, reg);
305 186002 : return num_lowered_params;
306 : }
307 :
308 713920 : void StackCheck(WasmCodePosition position) {
309 1165705 : if (FLAG_wasm_no_stack_checks || !env_->runtime_exception_support) return;
310 : out_of_line_code_.push_back(
311 524499 : OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
312 : OutOfLineCode& ool = out_of_line_code_.back();
313 524532 : Register limit_address = __ GetUnusedRegister(kGpReg).gp();
314 262168 : LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
315 262244 : __ StackCheck(ool.label.get(), limit_address);
316 262004 : __ bind(ool.continuation.get());
317 : }
318 :
319 711696 : void StartFunctionBody(FullDecoder* decoder, Control* block) {
320 2170502 : for (uint32_t i = 0; i < __ num_locals(); ++i) {
321 3425540 : if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
322 729604 : "param"))
323 252 : return;
324 : }
325 :
326 : // Input 0 is the call target, the instance is at 1.
327 : constexpr int kInstanceParameterIndex = 1;
328 : // Store the instance parameter to a special stack slot.
329 : compiler::LinkageLocation instance_loc =
330 711495 : descriptor_->GetInputLocation(kInstanceParameterIndex);
331 : DCHECK(instance_loc.IsRegister());
332 : DCHECK(!instance_loc.IsAnyRegister());
333 711516 : Register instance_reg = Register::from_code(instance_loc.AsRegister());
334 : DCHECK_EQ(kWasmInstanceRegister, instance_reg);
335 :
336 : // Parameter 0 is the instance parameter.
337 : uint32_t num_params =
338 711516 : static_cast<uint32_t>(decoder->sig_->parameter_count());
339 :
340 711516 : __ EnterFrame(StackFrame::WASM_COMPILED);
341 : __ set_has_frame(true);
342 711413 : pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
343 : // {PrepareStackFrame} is the first platform-specific assembler method.
344 : // If this failed, we can bail out immediately, avoiding runtime overhead
345 : // and potential failures because of other unimplemented methods.
346 : // A platform implementing {PrepareStackFrame} must ensure that we can
347 : // finish compilation without errors even if we hit unimplemented
348 : // LiftoffAssembler methods.
349 711413 : if (DidAssemblerBailout(decoder)) return;
350 :
351 711387 : __ SpillInstance(instance_reg);
352 : // Input 0 is the code target, 1 is the instance. First parameter at 2.
353 : uint32_t input_idx = kInstanceParameterIndex + 1;
354 186008 : for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
355 185983 : input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
356 : }
357 : DCHECK_EQ(input_idx, descriptor_->InputCount());
358 : // Set to a gp register, to mark this uninitialized.
359 : LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet();
360 : DCHECK(zero_double_reg.is_gp());
361 1798121 : for (uint32_t param_idx = num_params; param_idx < __ num_locals();
362 : ++param_idx) {
363 : ValueType type = decoder->GetLocalType(param_idx);
364 543331 : switch (type) {
365 : case kWasmI32:
366 46171 : __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
367 : break;
368 : case kWasmI64:
369 49794 : __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
370 : break;
371 : case kWasmF32:
372 : case kWasmF64:
373 447368 : if (zero_double_reg.is_gp()) {
374 : // Note: This might spill one of the registers used to hold
375 : // parameters.
376 : zero_double_reg = __ GetUnusedRegister(kFpReg);
377 : // Zero is represented by the bit pattern 0 for both f32 and f64.
378 864 : __ LoadConstant(zero_double_reg, WasmValue(0.));
379 : }
380 : __ PushRegister(type, zero_double_reg);
381 : break;
382 : default:
383 0 : UNIMPLEMENTED();
384 : }
385 : }
386 :
387 : // The function-prologue stack check is associated with position 0, which
388 : // is never a position of any instruction in the function.
389 711456 : StackCheck(0);
390 :
391 : DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
392 : }
393 :
394 668898 : void GenerateOutOfLineCode(OutOfLineCode& ool) {
395 668898 : __ bind(ool.label.get());
396 668918 : const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
397 : const bool is_mem_out_of_bounds =
398 : ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
399 :
400 668918 : if (is_mem_out_of_bounds && env_->use_trap_handler) {
401 819844 : uint32_t pc = static_cast<uint32_t>(__ pc_offset());
402 : DCHECK_EQ(pc, __ pc_offset());
403 : protected_instructions_.emplace_back(
404 211474 : trap_handler::ProtectedInstructionData{ool.pc, pc});
405 : }
406 :
407 668870 : if (!env_->runtime_exception_support) {
408 : // We cannot test calls to the runtime in cctest/test-run-wasm.
409 : // Therefore we emit a call to C here instead of a call to the runtime.
410 : // In this mode, we never generate stack checks.
411 : DCHECK(!is_stack_check);
412 272030 : __ CallTrapCallbackForTesting();
413 272030 : __ LeaveFrame(StackFrame::WASM_COMPILED);
414 : __ DropStackSlotsAndRet(
415 272030 : static_cast<uint32_t>(descriptor_->StackParameterCount()));
416 669040 : return;
417 : }
418 :
419 396840 : if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
420 :
421 : source_position_table_builder_.AddPosition(
422 793792 : __ pc_offset(), SourcePosition(ool.position), false);
423 397242 : __ CallRuntimeStub(ool.stub);
424 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
425 396872 : Safepoint::kNoLazyDeopt);
426 : DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
427 397072 : if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
428 397072 : if (is_stack_check) {
429 239487 : __ emit_jump(ool.continuation.get());
430 : } else {
431 : __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
432 : }
433 : }
434 :
435 687047 : void FinishFunction(FullDecoder* decoder) {
436 1374321 : if (DidAssemblerBailout(decoder)) return;
437 2043294 : for (OutOfLineCode& ool : out_of_line_code_) {
438 668903 : GenerateOutOfLineCode(ool);
439 : }
440 : __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
441 1374524 : __ GetTotalFrameSlotCount());
442 : __ FinishCode();
443 687439 : safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
444 : // The previous calls may have also generated a bailout.
445 687336 : DidAssemblerBailout(decoder);
446 : }
447 :
448 : void OnFirstError(FullDecoder* decoder) {
449 24483 : ok_ = false;
450 : BindUnboundLabels(decoder);
451 : asm_.AbortCompilation();
452 : }
453 :
454 : void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
455 : TraceCacheState(decoder);
456 : SLOW_DCHECK(__ ValidateCacheState());
457 : DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
458 : }
459 :
460 : void Block(FullDecoder* decoder, Control* block) {}
461 :
462 4905 : void Loop(FullDecoder* decoder, Control* loop) {
463 : // Before entering a loop, spill all locals to the stack, in order to free
464 : // the cache registers, and to avoid unnecessarily reloading stack values
465 : // into registers at branches.
466 : // TODO(clemensh): Come up with a better strategy here, involving
467 : // pre-analysis of the function.
468 2453 : __ SpillLocals();
469 :
470 : // Loop labels bind at the beginning of the block.
471 2453 : __ bind(loop->label.get());
472 :
473 : // Save the current cache state for the merge when jumping to this loop.
474 2453 : loop->label_state.Split(*__ cache_state());
475 :
476 : // Execute a stack check in the loop header.
477 2452 : StackCheck(decoder->position());
478 2454 : }
479 :
480 225 : void Try(FullDecoder* decoder, Control* block) {
481 : unsupported(decoder, "try");
482 225 : }
483 :
484 0 : void Catch(FullDecoder* decoder, Control* block, Value* exception) {
485 : unsupported(decoder, "catch");
486 0 : }
487 :
488 3391 : void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
489 : DCHECK_EQ(if_block, decoder->control_at(0));
490 : DCHECK(if_block->is_if());
491 :
492 3391 : if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
493 3423 : return unsupported(decoder, "multi-value if");
494 :
495 : // Allocate the else state.
496 6724 : if_block->else_state = base::make_unique<ElseState>();
497 :
498 : // Test the condition, jump to else if zero.
499 6729 : Register value = __ PopToRegister().gp();
500 : __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
501 3364 : value);
502 :
503 : // Store the state (after popping the value) for executing the else branch.
504 6718 : if_block->else_state->state.Split(*__ cache_state());
505 : }
506 :
507 0 : void FallThruTo(FullDecoder* decoder, Control* c) {
508 0 : if (c->end_merge.reached) {
509 0 : __ MergeFullStackWith(c->label_state, *__ cache_state());
510 : } else {
511 0 : c->label_state.Split(*__ cache_state());
512 : }
513 : TraceCacheState(decoder);
514 0 : }
515 :
516 1277 : void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
517 : DCHECK(c->is_onearmed_if());
518 1277 : if (c->end_merge.reached) {
519 : // Someone already merged to the end of the if. Merge both arms into that.
520 1277 : if (c->reachable()) {
521 : // Merge the if state into the end state.
522 669 : __ MergeFullStackWith(c->label_state, *__ cache_state());
523 0 : __ emit_jump(c->label.get());
524 : }
525 : // Merge the else state into the end state.
526 60 : __ bind(c->else_state->label.get());
527 60 : __ MergeFullStackWith(c->label_state, c->else_state->state);
528 60 : __ cache_state()->Steal(c->label_state);
529 1217 : } else if (c->reachable()) {
530 : // No merge yet at the end of the if, but we need to create a merge for
531 : // the both arms of this if. Thus init the merge point from the else
532 : // state, then merge the if state into that.
533 : DCHECK_EQ(0, c->end_merge.arity);
534 : c->label_state.InitMerge(c->else_state->state, __ num_locals(), 0,
535 1338 : c->stack_depth);
536 670 : __ MergeFullStackWith(c->label_state, *__ cache_state());
537 669 : __ emit_jump(c->label.get());
538 : // Merge the else state into the end state.
539 669 : __ bind(c->else_state->label.get());
540 670 : __ MergeFullStackWith(c->label_state, c->else_state->state);
541 670 : __ cache_state()->Steal(c->label_state);
542 : } else {
543 : // No merge needed, just continue with the else state.
544 548 : __ bind(c->else_state->label.get());
545 548 : __ cache_state()->Steal(c->else_state->state);
546 : }
547 1278 : }
548 :
549 158382 : void PopControl(FullDecoder* decoder, Control* c) {
550 454585 : if (c->is_loop()) return; // A loop just falls through.
551 156200 : if (c->is_onearmed_if()) {
552 : // Special handling for one-armed ifs.
553 1278 : FinishOneArmedIf(decoder, c);
554 154922 : } else if (c->end_merge.reached) {
555 : // There is a merge already. Merge our state into that, then continue with
556 : // that state.
557 137817 : if (c->reachable()) {
558 2596 : __ MergeFullStackWith(c->label_state, *__ cache_state());
559 : }
560 137819 : __ cache_state()->Steal(c->label_state);
561 : } else {
562 : // No merge, just continue with our current state.
563 : }
564 :
565 156201 : if (!c->label.get()->is_bound()) __ bind(c->label.get());
566 : }
567 :
568 : void EndControl(FullDecoder* decoder, Control* c) {}
569 :
570 : enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
571 :
572 33686 : void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
573 : ValueType out_argument_type,
574 : const LiftoffRegister* arg_regs,
575 : ExternalReference ext_ref) {
576 : // Before making a call, spill all cache registers.
577 33686 : __ SpillAllRegisters();
578 :
579 : // Store arguments on our stack, then align the stack for calling to C.
580 33686 : int param_bytes = 0;
581 101058 : for (ValueType param_type : sig->parameters()) {
582 67372 : param_bytes += ValueTypes::MemSize(param_type);
583 : }
584 : int out_arg_bytes = out_argument_type == kWasmStmt
585 : ? 0
586 33686 : : ValueTypes::MemSize(out_argument_type);
587 33686 : int stack_bytes = std::max(param_bytes, out_arg_bytes);
588 : __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
589 33686 : ext_ref);
590 33686 : }
591 :
592 : template <ValueType src_type, ValueType result_type, class EmitFn>
593 121164 : void EmitUnOp(EmitFn fn) {
594 : static RegClass src_rc = reg_class_for(src_type);
595 : static RegClass result_rc = reg_class_for(result_type);
596 121164 : LiftoffRegister src = __ PopToRegister();
597 : LiftoffRegister dst = src_rc == result_rc
598 363495 : ? __ GetUnusedRegister(result_rc, {src})
599 242326 : : __ GetUnusedRegister(result_rc);
600 591 : fn(dst, src);
601 : __ PushRegister(result_type, dst);
602 121170 : }
603 :
604 91 : void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
605 : Register),
606 : ExternalReference (*fallback_fn)()) {
607 91 : auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
608 273 : if (emit_fn && (asm_.*emit_fn)(dst.gp(), src.gp())) return;
609 0 : ExternalReference ext_ref = fallback_fn();
610 0 : ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
611 : FunctionSig sig_i_i(1, 1, sig_i_i_reps);
612 0 : GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, ext_ref);
613 91 : };
614 91 : EmitUnOp<kWasmI32, kWasmI32>(emit_with_c_fallback);
615 91 : }
616 :
617 : template <ValueType type>
618 181 : void EmitFloatUnOpWithCFallback(
619 : bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
620 : ExternalReference (*fallback_fn)()) {
621 182 : auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
622 546 : if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
623 0 : ExternalReference ext_ref = fallback_fn();
624 0 : ValueType sig_reps[] = {type};
625 : FunctionSig sig(0, 1, sig_reps);
626 0 : GenerateCCall(&dst, &sig, type, &src, ext_ref);
627 181 : };
628 181 : EmitUnOp<type, type>(emit_with_c_fallback);
629 182 : }
630 :
631 : enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
632 : template <ValueType dst_type, ValueType src_type,
633 : TypeConversionTrapping can_trap>
634 130774 : void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
635 : WasmCodePosition trap_position) {
636 : static constexpr RegClass src_rc = reg_class_for(src_type);
637 : static constexpr RegClass dst_rc = reg_class_for(dst_type);
638 130774 : LiftoffRegister src = __ PopToRegister();
639 28879 : LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
640 129572 : : __ GetUnusedRegister(dst_rc);
641 : DCHECK_EQ(!!can_trap, trap_position > 0);
642 : Label* trap = can_trap ? AddOutOfLineTrap(
643 : trap_position,
644 : WasmCode::kThrowWasmTrapFloatUnrepresentable)
645 1213 : : nullptr;
646 130791 : if (!__ emit_type_conversion(opcode, dst, src, trap)) {
647 : DCHECK_NOT_NULL(fallback_fn);
648 0 : ExternalReference ext_ref = fallback_fn();
649 : if (can_trap) {
650 : // External references for potentially trapping conversions return int.
651 0 : ValueType sig_reps[] = {kWasmI32, src_type};
652 : FunctionSig sig(1, 1, sig_reps);
653 : LiftoffRegister ret_reg =
654 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
655 0 : LiftoffRegister dst_regs[] = {ret_reg, dst};
656 0 : GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
657 0 : __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
658 : } else {
659 0 : ValueType sig_reps[] = {src_type};
660 : FunctionSig sig(0, 1, sig_reps);
661 0 : GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
662 : }
663 : }
664 : __ PushRegister(dst_type, dst);
665 130784 : }
666 :
667 253327 : void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
668 : Value* result) {
669 : #define CASE_I32_UNOP(opcode, fn) \
670 : case WasmOpcode::kExpr##opcode: \
671 : EmitUnOp<kWasmI32, kWasmI32>( \
672 : [=](LiftoffRegister dst, LiftoffRegister src) { \
673 : __ emit_##fn(dst.gp(), src.gp()); \
674 : }); \
675 : break;
676 : #define CASE_I32_SIGN_EXTENSION(opcode, fn) \
677 : case WasmOpcode::kExpr##opcode: \
678 : EmitUnOp<kWasmI32, kWasmI32>( \
679 : [=](LiftoffRegister dst, LiftoffRegister src) { \
680 : __ emit_##fn(dst.gp(), src.gp()); \
681 : }); \
682 : break;
683 : #define CASE_I64_SIGN_EXTENSION(opcode, fn) \
684 : case WasmOpcode::kExpr##opcode: \
685 : EmitUnOp<kWasmI64, kWasmI64>( \
686 : [=](LiftoffRegister dst, LiftoffRegister src) { \
687 : __ emit_##fn(dst, src); \
688 : }); \
689 : break;
690 : #define CASE_FLOAT_UNOP(opcode, type, fn) \
691 : case WasmOpcode::kExpr##opcode: \
692 : EmitUnOp<kWasm##type, kWasm##type>( \
693 : [=](LiftoffRegister dst, LiftoffRegister src) { \
694 : __ emit_##fn(dst.fp(), src.fp()); \
695 : }); \
696 : break;
697 : #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
698 : case WasmOpcode::kExpr##opcode: \
699 : EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
700 : &ExternalReference::wasm_##fn); \
701 : break;
702 : #define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
703 : case WasmOpcode::kExpr##opcode: \
704 : EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
705 : kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \
706 : break;
707 252122 : switch (opcode) {
708 236054 : CASE_I32_UNOP(I32Eqz, i32_eqz)
709 2640 : CASE_I32_UNOP(I32Clz, i32_clz)
710 822 : CASE_I32_UNOP(I32Ctz, i32_ctz)
711 76 : CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
712 512 : CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
713 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil)
714 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor)
715 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc)
716 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int)
717 688 : CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
718 76 : CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
719 513 : CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
720 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
721 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
722 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
723 23 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
724 580 : CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
725 916 : CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
726 135 : CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
727 50 : CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap)
728 135 : CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap)
729 41 : CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap)
730 57286 : CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap)
731 82 : CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap)
732 13170 : CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap)
733 41 : CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32,
734 : &ExternalReference::wasm_float32_to_int64, kCanTrap)
735 41 : CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32,
736 : &ExternalReference::wasm_float32_to_uint64, kCanTrap)
737 712 : CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64,
738 : &ExternalReference::wasm_float64_to_int64, kCanTrap)
739 50 : CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64,
740 : &ExternalReference::wasm_float64_to_uint64, kCanTrap)
741 56935 : CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap)
742 82 : CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap)
743 45 : CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap)
744 50 : CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
745 : &ExternalReference::wasm_int64_to_float32, kNoTrap)
746 32 : CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
747 : &ExternalReference::wasm_uint64_to_float32, kNoTrap)
748 98 : CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap)
749 149 : CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap)
750 68 : CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap)
751 73 : CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap)
752 73 : CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
753 : &ExternalReference::wasm_int64_to_float64, kNoTrap)
754 149 : CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
755 : &ExternalReference::wasm_uint64_to_float64, kNoTrap)
756 171 : CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
757 194 : CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
758 28 : CASE_I32_SIGN_EXTENSION(I32SExtendI8, i32_signextend_i8)
759 10 : CASE_I32_SIGN_EXTENSION(I32SExtendI16, i32_signextend_i16)
760 28 : CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8)
761 10 : CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16)
762 10 : CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32)
763 : case kExprI32Popcnt:
764 : EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
765 91 : &ExternalReference::wasm_word32_popcnt);
766 : break;
767 : case WasmOpcode::kExprI64Eqz:
768 : EmitUnOp<kWasmI64, kWasmI32>(
769 : [=](LiftoffRegister dst, LiftoffRegister src) {
770 186 : __ emit_i64_eqz(dst.gp(), src);
771 186 : });
772 : break;
773 : default:
774 252304 : return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
775 : }
776 : #undef CASE_I32_UNOP
777 : #undef CASE_I32_SIGN_EXTENSION
778 : #undef CASE_I64_SIGN_EXTENSION
779 : #undef CASE_FLOAT_UNOP
780 : #undef CASE_FLOAT_UNOP_WITH_CFALLBACK
781 : #undef CASE_TYPE_CONVERSION
782 : }
783 :
784 : template <ValueType src_type, ValueType result_type, typename EmitFn>
785 901905 : void EmitBinOp(EmitFn fn) {
786 : static constexpr RegClass src_rc = reg_class_for(src_type);
787 : static constexpr RegClass result_rc = reg_class_for(result_type);
788 901905 : LiftoffRegister rhs = __ PopToRegister();
789 901918 : LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
790 : LiftoffRegister dst = src_rc == result_rc
791 1801168 : ? __ GetUnusedRegister(result_rc, {lhs, rhs})
792 900590 : : __ GetUnusedRegister(result_rc);
793 166284 : fn(dst, lhs, rhs);
794 : __ PushRegister(result_type, dst);
795 901921 : }
796 :
797 : void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
798 : LiftoffRegister rhs, ExternalReference ext_ref,
799 : Label* trap_by_zero,
800 : Label* trap_unrepresentable = nullptr) {
801 : // Cannot emit native instructions, build C call.
802 : LiftoffRegister ret =
803 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
804 : LiftoffRegister tmp =
805 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
806 : LiftoffRegister arg_regs[] = {lhs, rhs};
807 : LiftoffRegister result_regs[] = {ret, dst};
808 : ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
809 : // <i64, i64> -> i32 (with i64 output argument)
810 : FunctionSig sig(1, 2, sig_types);
811 : GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
812 : __ LoadConstant(tmp, WasmValue(int32_t{0}));
813 : __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
814 : if (trap_unrepresentable) {
815 : __ LoadConstant(tmp, WasmValue(int32_t{-1}));
816 : __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
817 : tmp.gp());
818 : }
819 : }
820 :
821 902101 : void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
822 : const Value& rhs, Value* result) {
823 : #define CASE_I32_BINOP(opcode, fn) \
824 : case WasmOpcode::kExpr##opcode: \
825 : return EmitBinOp<kWasmI32, kWasmI32>( \
826 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
827 : __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
828 : });
829 : #define CASE_I64_BINOP(opcode, fn) \
830 : case WasmOpcode::kExpr##opcode: \
831 : return EmitBinOp<kWasmI64, kWasmI64>( \
832 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
833 : __ emit_##fn(dst, lhs, rhs); \
834 : });
835 : #define CASE_FLOAT_BINOP(opcode, type, fn) \
836 : case WasmOpcode::kExpr##opcode: \
837 : return EmitBinOp<kWasm##type, kWasm##type>( \
838 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
839 : __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
840 : });
841 : #define CASE_I32_CMPOP(opcode, cond) \
842 : case WasmOpcode::kExpr##opcode: \
843 : return EmitBinOp<kWasmI32, kWasmI32>( \
844 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
845 : __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
846 : });
847 : #define CASE_I64_CMPOP(opcode, cond) \
848 : case WasmOpcode::kExpr##opcode: \
849 : return EmitBinOp<kWasmI64, kWasmI32>( \
850 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
851 : __ emit_i64_set_cond(cond, dst.gp(), lhs, rhs); \
852 : });
853 : #define CASE_F32_CMPOP(opcode, cond) \
854 : case WasmOpcode::kExpr##opcode: \
855 : return EmitBinOp<kWasmF32, kWasmI32>( \
856 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
857 : __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
858 : });
859 : #define CASE_F64_CMPOP(opcode, cond) \
860 : case WasmOpcode::kExpr##opcode: \
861 : return EmitBinOp<kWasmF64, kWasmI32>( \
862 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
863 : __ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
864 : });
865 : #define CASE_I32_SHIFTOP(opcode, fn) \
866 : case WasmOpcode::kExpr##opcode: \
867 : return EmitBinOp<kWasmI32, kWasmI32>( \
868 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
869 : __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
870 : });
871 : #define CASE_I64_SHIFTOP(opcode, fn) \
872 : case WasmOpcode::kExpr##opcode: \
873 : return EmitBinOp<kWasmI64, kWasmI64>([=](LiftoffRegister dst, \
874 : LiftoffRegister src, \
875 : LiftoffRegister amount) { \
876 : __ emit_##fn(dst, src, amount.is_pair() ? amount.low_gp() : amount.gp(), \
877 : {}); \
878 : });
879 : #define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
880 : case WasmOpcode::kExpr##opcode: \
881 : return EmitBinOp<kWasmI32, kWasmI32>( \
882 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
883 : LiftoffRegister args[] = {lhs, rhs}; \
884 : auto ext_ref = ExternalReference::ext_ref_fn(); \
885 : ValueType sig_i_ii_reps[] = {kWasmI32, kWasmI32, kWasmI32}; \
886 : FunctionSig sig_i_ii(1, 2, sig_i_ii_reps); \
887 : GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
888 : });
889 902101 : switch (opcode) {
890 629251 : CASE_I32_BINOP(I32Add, i32_add)
891 57943 : CASE_I32_BINOP(I32Sub, i32_sub)
892 35892 : CASE_I32_BINOP(I32Mul, i32_mul)
893 51476 : CASE_I32_BINOP(I32And, i32_and)
894 33988 : CASE_I32_BINOP(I32Ior, i32_or)
895 33686 : CASE_I32_BINOP(I32Xor, i32_xor)
896 17274 : CASE_I64_BINOP(I64And, i64_and)
897 13197 : CASE_I64_BINOP(I64Ior, i64_or)
898 126 : CASE_I64_BINOP(I64Xor, i64_xor)
899 192306 : CASE_I32_CMPOP(I32Eq, kEqual)
900 33920 : CASE_I32_CMPOP(I32Ne, kUnequal)
901 33744 : CASE_I32_CMPOP(I32LtS, kSignedLessThan)
902 33866 : CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
903 33722 : CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
904 33722 : CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
905 33776 : CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
906 33776 : CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
907 33704 : CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
908 33750 : CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
909 1676 : CASE_I64_BINOP(I64Add, i64_add)
910 1910 : CASE_I64_BINOP(I64Sub, i64_sub)
911 1884 : CASE_I64_BINOP(I64Mul, i64_mul)
912 77762 : CASE_I64_CMPOP(I64Eq, kEqual)
913 66 : CASE_I64_CMPOP(I64Ne, kUnequal)
914 138 : CASE_I64_CMPOP(I64LtS, kSignedLessThan)
915 84 : CASE_I64_CMPOP(I64LtU, kUnsignedLessThan)
916 84 : CASE_I64_CMPOP(I64GtS, kSignedGreaterThan)
917 84 : CASE_I64_CMPOP(I64GtU, kUnsignedGreaterThan)
918 66 : CASE_I64_CMPOP(I64LeS, kSignedLessEqual)
919 102 : CASE_I64_CMPOP(I64LeU, kUnsignedLessEqual)
920 66 : CASE_I64_CMPOP(I64GeS, kSignedGreaterEqual)
921 66 : CASE_I64_CMPOP(I64GeU, kUnsignedGreaterEqual)
922 616 : CASE_F32_CMPOP(F32Eq, kEqual)
923 184 : CASE_F32_CMPOP(F32Ne, kUnequal)
924 508 : CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
925 544 : CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
926 508 : CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
927 472 : CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
928 364 : CASE_F64_CMPOP(F64Eq, kEqual)
929 184 : CASE_F64_CMPOP(F64Ne, kUnequal)
930 544 : CASE_F64_CMPOP(F64Lt, kUnsignedLessThan)
931 436 : CASE_F64_CMPOP(F64Gt, kUnsignedGreaterThan)
932 508 : CASE_F64_CMPOP(F64Le, kUnsignedLessEqual)
933 436 : CASE_F64_CMPOP(F64Ge, kUnsignedGreaterEqual)
934 70460 : CASE_I32_SHIFTOP(I32Shl, i32_shl)
935 70352 : CASE_I32_SHIFTOP(I32ShrS, i32_sar)
936 70316 : CASE_I32_SHIFTOP(I32ShrU, i32_shr)
937 22094 : CASE_I64_SHIFTOP(I64Shl, i64_shl)
938 2328 : CASE_I64_SHIFTOP(I64ShrS, i64_sar)
939 2787 : CASE_I64_SHIFTOP(I64ShrU, i64_shr)
940 50556 : CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
941 50502 : CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
942 1532 : CASE_FLOAT_BINOP(F32Add, F32, f32_add)
943 1221 : CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
944 1515 : CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
945 1287 : CASE_FLOAT_BINOP(F32Div, F32, f32_div)
946 148 : CASE_FLOAT_BINOP(F32Min, F32, f32_min)
947 148 : CASE_FLOAT_BINOP(F32Max, F32, f32_max)
948 96 : CASE_FLOAT_BINOP(F32CopySign, F32, f32_copysign)
949 2826 : CASE_FLOAT_BINOP(F64Add, F64, f64_add)
950 1233 : CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
951 1905 : CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
952 1489 : CASE_FLOAT_BINOP(F64Div, F64, f64_div)
953 184 : CASE_FLOAT_BINOP(F64Min, F64, f64_min)
954 148 : CASE_FLOAT_BINOP(F64Max, F64, f64_max)
955 96 : CASE_FLOAT_BINOP(F64CopySign, F64, f64_copysign)
956 : case WasmOpcode::kExprI32DivS:
957 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
958 : LiftoffRegister lhs,
959 17721 : LiftoffRegister rhs) {
960 17721 : WasmCodePosition position = decoder->position();
961 17721 : AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
962 : // Adding the second trap might invalidate the pointer returned for
963 : // the first one, thus get both pointers afterwards.
964 : AddOutOfLineTrap(position,
965 17721 : WasmCode::kThrowWasmTrapDivUnrepresentable);
966 35442 : Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
967 17721 : Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
968 : __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
969 17721 : div_unrepresentable);
970 35442 : });
971 : break;
972 : case WasmOpcode::kExprI32DivU:
973 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
974 : LiftoffRegister lhs,
975 17692 : LiftoffRegister rhs) {
976 : Label* div_by_zero = AddOutOfLineTrap(
977 35384 : decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
978 17692 : __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
979 35384 : });
980 : break;
981 : case WasmOpcode::kExprI32RemS:
982 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
983 : LiftoffRegister lhs,
984 17621 : LiftoffRegister rhs) {
985 : Label* rem_by_zero = AddOutOfLineTrap(
986 35242 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
987 17621 : __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
988 35242 : });
989 : break;
990 : case WasmOpcode::kExprI32RemU:
991 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
992 : LiftoffRegister lhs,
993 17620 : LiftoffRegister rhs) {
994 : Label* rem_by_zero = AddOutOfLineTrap(
995 35240 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
996 17620 : __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
997 35240 : });
998 : break;
999 : case WasmOpcode::kExprI64DivS:
1000 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1001 : LiftoffRegister lhs,
1002 883 : LiftoffRegister rhs) {
1003 883 : WasmCodePosition position = decoder->position();
1004 883 : AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
1005 : // Adding the second trap might invalidate the pointer returned for
1006 : // the first one, thus get both pointers afterwards.
1007 : AddOutOfLineTrap(position,
1008 883 : WasmCode::kThrowWasmTrapDivUnrepresentable);
1009 1766 : Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
1010 883 : Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
1011 : if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
1012 883 : div_unrepresentable)) {
1013 : ExternalReference ext_ref = ExternalReference::wasm_int64_div();
1014 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
1015 : div_unrepresentable);
1016 : }
1017 1766 : });
1018 : break;
1019 : case WasmOpcode::kExprI64DivU:
1020 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1021 : LiftoffRegister lhs,
1022 810 : LiftoffRegister rhs) {
1023 : Label* div_by_zero = AddOutOfLineTrap(
1024 1620 : decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
1025 810 : if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
1026 : ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
1027 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
1028 : }
1029 1620 : });
1030 : break;
1031 : case WasmOpcode::kExprI64RemS:
1032 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1033 : LiftoffRegister lhs,
1034 792 : LiftoffRegister rhs) {
1035 : Label* rem_by_zero = AddOutOfLineTrap(
1036 1584 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
1037 792 : if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
1038 : ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
1039 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
1040 : }
1041 1584 : });
1042 : break;
1043 : case WasmOpcode::kExprI64RemU:
1044 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1045 : LiftoffRegister lhs,
1046 792 : LiftoffRegister rhs) {
1047 : Label* rem_by_zero = AddOutOfLineTrap(
1048 1584 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
1049 792 : if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
1050 : ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
1051 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
1052 : }
1053 1584 : });
1054 : break;
1055 : default:
1056 193 : return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
1057 : }
1058 : #undef CASE_I32_BINOP
1059 : #undef CASE_I64_BINOP
1060 : #undef CASE_FLOAT_BINOP
1061 : #undef CASE_I32_CMPOP
1062 : #undef CASE_I64_CMPOP
1063 : #undef CASE_F32_CMPOP
1064 : #undef CASE_F64_CMPOP
1065 : #undef CASE_I32_SHIFTOP
1066 : #undef CASE_I64_SHIFTOP
1067 : #undef CASE_CCALL_BINOP
1068 : }
1069 :
1070 : void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
1071 1571675 : __ cache_state()->stack_state.emplace_back(kWasmI32, value);
1072 : }
1073 :
1074 46316 : void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
1075 : // The {VarState} stores constant values as int32_t, thus we only store
1076 : // 64-bit constants in this field if it fits in an int32_t. Larger values
1077 : // cannot be used as immediate value anyway, so we can also just put them in
1078 : // a register immediately.
1079 46316 : int32_t value_i32 = static_cast<int32_t>(value);
1080 46316 : if (value_i32 == value) {
1081 21970 : __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
1082 : } else {
1083 24346 : LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
1084 24346 : __ LoadConstant(reg, WasmValue(value));
1085 : __ PushRegister(kWasmI64, reg);
1086 : }
1087 46321 : }
1088 :
1089 132940 : void F32Const(FullDecoder* decoder, Value* result, float value) {
1090 132940 : LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1091 132940 : __ LoadConstant(reg, WasmValue(value));
1092 : __ PushRegister(kWasmF32, reg);
1093 132941 : }
1094 :
1095 133985 : void F64Const(FullDecoder* decoder, Value* result, double value) {
1096 133985 : LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1097 133986 : __ LoadConstant(reg, WasmValue(value));
1098 : __ PushRegister(kWasmF64, reg);
1099 133985 : }
1100 :
1101 36 : void RefNull(FullDecoder* decoder, Value* result) {
1102 : unsupported(decoder, "ref_null");
1103 36 : }
1104 :
1105 3293 : void Drop(FullDecoder* decoder, const Value& value) {
1106 3293 : auto& slot = __ cache_state()->stack_state.back();
1107 : // If the dropped slot contains a register, decrement it's use count.
1108 3293 : if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
1109 : __ cache_state()->stack_state.pop_back();
1110 3293 : }
1111 :
1112 694887 : void ReturnImpl(FullDecoder* decoder) {
1113 694887 : size_t num_returns = decoder->sig_->return_count();
1114 1389789 : if (num_returns > 1) return unsupported(decoder, "multi-return");
1115 694192 : if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
1116 694201 : __ LeaveFrame(StackFrame::WASM_COMPILED);
1117 : __ DropStackSlotsAndRet(
1118 694207 : static_cast<uint32_t>(descriptor_->StackParameterCount()));
1119 : }
1120 :
1121 : void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
1122 694747 : ReturnImpl(decoder);
1123 : }
1124 :
1125 289981 : void GetLocal(FullDecoder* decoder, Value* result,
1126 : const LocalIndexImmediate<validate>& imm) {
1127 869952 : auto& slot = __ cache_state()->stack_state[imm.index];
1128 : DCHECK_EQ(slot.type(), imm.type);
1129 289981 : switch (slot.loc()) {
1130 : case kRegister:
1131 : __ PushRegister(slot.type(), slot.reg());
1132 : break;
1133 : case KIntConst:
1134 476 : __ cache_state()->stack_state.emplace_back(imm.type, slot.i32_const());
1135 : break;
1136 : case kStack: {
1137 85534 : auto rc = reg_class_for(imm.type);
1138 85534 : LiftoffRegister reg = __ GetUnusedRegister(rc);
1139 85534 : __ Fill(reg, imm.index, imm.type);
1140 : __ PushRegister(slot.type(), reg);
1141 : break;
1142 : }
1143 : }
1144 289972 : }
1145 :
1146 0 : void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
1147 : uint32_t local_index) {
1148 : auto& state = *__ cache_state();
1149 : ValueType type = dst_slot.type();
1150 0 : if (dst_slot.is_reg()) {
1151 : LiftoffRegister slot_reg = dst_slot.reg();
1152 0 : if (state.get_use_count(slot_reg) == 1) {
1153 0 : __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
1154 0 : return;
1155 : }
1156 : state.dec_used(slot_reg);
1157 : dst_slot.MakeStack();
1158 : }
1159 : DCHECK_EQ(type, __ local_type(local_index));
1160 : RegClass rc = reg_class_for(type);
1161 0 : LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
1162 0 : __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
1163 0 : dst_slot = LiftoffAssembler::VarState(type, dst_reg);
1164 : __ cache_state()->inc_used(dst_reg);
1165 : }
1166 :
1167 94084 : void SetLocal(uint32_t local_index, bool is_tee) {
1168 : auto& state = *__ cache_state();
1169 282252 : auto& source_slot = state.stack_state.back();
1170 188170 : auto& target_slot = state.stack_state[local_index];
1171 94084 : switch (source_slot.loc()) {
1172 : case kRegister:
1173 92605 : if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1174 92605 : target_slot = source_slot;
1175 92605 : if (is_tee) state.inc_used(target_slot.reg());
1176 : break;
1177 : case KIntConst:
1178 1481 : if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1179 1481 : target_slot = source_slot;
1180 1481 : break;
1181 : case kStack:
1182 0 : SetLocalFromStackSlot(target_slot, local_index);
1183 0 : break;
1184 : }
1185 94084 : if (!is_tee) __ cache_state()->stack_state.pop_back();
1186 94084 : }
1187 :
1188 : void SetLocal(FullDecoder* decoder, const Value& value,
1189 : const LocalIndexImmediate<validate>& imm) {
1190 92884 : SetLocal(imm.index, false);
1191 : }
1192 :
1193 : void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
1194 : const LocalIndexImmediate<validate>& imm) {
1195 1202 : SetLocal(imm.index, true);
1196 : }
1197 :
1198 1751 : Register GetGlobalBaseAndOffset(const WasmGlobal* global,
1199 : LiftoffRegList& pinned, uint32_t* offset) {
1200 1751 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
1201 1751 : if (global->mutability && global->imported) {
1202 189 : LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
1203 : __ Load(LiftoffRegister(addr), addr, no_reg,
1204 378 : global->index * sizeof(Address), kPointerLoadType, pinned);
1205 189 : *offset = 0;
1206 : } else {
1207 1562 : LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
1208 1563 : *offset = global->offset;
1209 : }
1210 1752 : return addr;
1211 : }
1212 :
1213 1434 : void GetGlobal(FullDecoder* decoder, Value* result,
1214 : const GlobalIndexImmediate<validate>& imm) {
1215 1434 : const auto* global = &env_->module->globals[imm.index];
1216 1434 : if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
1217 162 : return;
1218 1272 : LiftoffRegList pinned;
1219 1272 : uint32_t offset = 0;
1220 1272 : Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1221 : LiftoffRegister value =
1222 2546 : pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
1223 1272 : LoadType type = LoadType::ForValueType(global->type);
1224 1272 : __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
1225 1271 : __ PushRegister(global->type, value);
1226 : }
1227 :
1228 479 : void SetGlobal(FullDecoder* decoder, const Value& value,
1229 : const GlobalIndexImmediate<validate>& imm) {
1230 479 : auto* global = &env_->module->globals[imm.index];
1231 479 : if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
1232 0 : return;
1233 479 : LiftoffRegList pinned;
1234 479 : uint32_t offset = 0;
1235 479 : Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1236 958 : LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
1237 479 : StoreType type = StoreType::ForValueType(global->type);
1238 479 : __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
1239 : }
1240 :
1241 119307 : void Unreachable(FullDecoder* decoder) {
1242 : Label* unreachable_label = AddOutOfLineTrap(
1243 119307 : decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
1244 : __ emit_jump(unreachable_label);
1245 : __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
1246 119307 : }
1247 :
1248 954 : void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
1249 : const Value& tval, Value* result) {
1250 : LiftoffRegList pinned;
1251 1908 : Register condition = pinned.set(__ PopToRegister()).gp();
1252 954 : ValueType type = __ cache_state()->stack_state.end()[-1].type();
1253 : DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
1254 954 : LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
1255 956 : LiftoffRegister true_value = __ PopToRegister(pinned);
1256 : LiftoffRegister dst =
1257 1918 : __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
1258 : __ PushRegister(type, dst);
1259 :
1260 : // Now emit the actual code to move either {true_value} or {false_value}
1261 : // into {dst}.
1262 959 : Label cont;
1263 959 : Label case_false;
1264 959 : __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
1265 955 : if (dst != true_value) __ Move(dst, true_value, type);
1266 : __ emit_jump(&cont);
1267 :
1268 957 : __ bind(&case_false);
1269 957 : if (dst != false_value) __ Move(dst, false_value, type);
1270 958 : __ bind(&cont);
1271 958 : }
1272 :
1273 137983 : void BrImpl(Control* target) {
1274 137983 : if (!target->br_merge()->reached) {
1275 136757 : target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1276 : target->br_merge()->arity,
1277 410271 : target->stack_depth);
1278 : }
1279 137986 : __ MergeStackWith(target->label_state, target->br_merge()->arity);
1280 137987 : __ jmp(target->label.get());
1281 137984 : }
1282 :
1283 2669 : void Br(FullDecoder* decoder, Control* target) { BrImpl(target); }
1284 :
1285 135511 : void BrOrRet(FullDecoder* decoder, uint32_t depth) {
1286 135511 : if (depth == decoder->control_depth() - 1) {
1287 196 : ReturnImpl(decoder);
1288 : } else {
1289 135315 : BrImpl(decoder->control_at(depth));
1290 : }
1291 135512 : }
1292 :
1293 119176 : void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
1294 119176 : Label cont_false;
1295 238353 : Register value = __ PopToRegister().gp();
1296 119177 : __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
1297 :
1298 119179 : BrOrRet(decoder, depth);
1299 119178 : __ bind(&cont_false);
1300 119178 : }
1301 :
1302 : // Generate a branch table case, potentially reusing previously generated
1303 : // stack transfer code.
1304 245112 : void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
1305 : std::map<uint32_t, MovableLabel>& br_targets) {
1306 245112 : MovableLabel& label = br_targets[br_depth];
1307 245126 : if (label.get()->is_bound()) {
1308 228791 : __ jmp(label.get());
1309 : } else {
1310 16335 : __ bind(label.get());
1311 16334 : BrOrRet(decoder, br_depth);
1312 : }
1313 245124 : }
1314 :
1315 : // Generate a branch table for input in [min, max).
1316 : // TODO(wasm): Generate a real branch table (like TF TableSwitch).
1317 471260 : void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
1318 : LiftoffRegister value, uint32_t min, uint32_t max,
1319 : BranchTableIterator<validate>& table_iterator,
1320 : std::map<uint32_t, MovableLabel>& br_targets) {
1321 : DCHECK_LT(min, max);
1322 : // Check base case.
1323 471260 : if (max == min + 1) {
1324 : DCHECK_EQ(min, table_iterator.cur_index());
1325 238622 : GenerateBrCase(decoder, table_iterator.next(), br_targets);
1326 709888 : return;
1327 : }
1328 :
1329 232638 : uint32_t split = min + (max - min) / 2;
1330 232638 : Label upper_half;
1331 232638 : __ LoadConstant(tmp, WasmValue(split));
1332 : __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
1333 232639 : tmp.gp());
1334 : // Emit br table for lower half:
1335 : GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
1336 232638 : br_targets);
1337 232637 : __ bind(&upper_half);
1338 : // Emit br table for upper half:
1339 : GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
1340 232636 : br_targets);
1341 : }
1342 :
1343 6499 : void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
1344 : const Value& key) {
1345 : LiftoffRegList pinned;
1346 12999 : LiftoffRegister value = pinned.set(__ PopToRegister());
1347 : BranchTableIterator<validate> table_iterator(decoder, imm);
1348 : std::map<uint32_t, MovableLabel> br_targets;
1349 :
1350 6500 : if (imm.table_count > 0) {
1351 : LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
1352 11982 : __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
1353 5990 : Label case_default;
1354 : __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
1355 5990 : value.gp(), tmp.gp());
1356 :
1357 : GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
1358 5990 : br_targets);
1359 :
1360 5989 : __ bind(&case_default);
1361 : }
1362 :
1363 : // Generate the default case.
1364 6500 : GenerateBrCase(decoder, table_iterator.next(), br_targets);
1365 : DCHECK(!table_iterator.has_next());
1366 6501 : }
1367 :
1368 1786 : void Else(FullDecoder* decoder, Control* c) {
1369 1786 : if (c->reachable()) {
1370 1498 : if (!c->end_merge.reached) {
1371 1492 : c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1372 2984 : c->end_merge.arity, c->stack_depth);
1373 : }
1374 1499 : __ MergeFullStackWith(c->label_state, *__ cache_state());
1375 1497 : __ emit_jump(c->label.get());
1376 : }
1377 1784 : __ bind(c->else_state->label.get());
1378 1783 : __ cache_state()->Steal(c->else_state->state);
1379 1784 : }
1380 :
1381 458036 : Label* AddOutOfLineTrap(WasmCodePosition position,
1382 : WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
1383 : DCHECK(!FLAG_wasm_no_bounds_checks);
1384 : // The pc is needed for memory OOB trap with trap handler enabled. Other
1385 : // callers should not even compute it.
1386 : DCHECK_EQ(pc != 0, stub == WasmCode::kThrowWasmTrapMemOutOfBounds &&
1387 : env_->use_trap_handler);
1388 :
1389 916140 : out_of_line_code_.push_back(OutOfLineCode::Trap(stub, position, pc));
1390 458104 : return out_of_line_code_.back().label.get();
1391 : }
1392 :
1393 : // Returns true if the memory access is statically known to be out of bounds
1394 : // (a jump to the trap was generated then); return false otherwise.
1395 240476 : bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
1396 : uint32_t offset, Register index, LiftoffRegList pinned) {
1397 : const bool statically_oob =
1398 238641 : !IsInBounds(offset, access_size, env_->max_memory_size);
1399 :
1400 238641 : if (!statically_oob &&
1401 236869 : (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
1402 : return false;
1403 : }
1404 :
1405 : // TODO(wasm): This adds protected instruction information for the jump
1406 : // instruction we are about to generate. It would be better to just not add
1407 : // protected instruction info when the pc is 0.
1408 : Label* trap_label = AddOutOfLineTrap(
1409 : decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds,
1410 5460 : env_->use_trap_handler ? __ pc_offset() : 0);
1411 :
1412 1834 : if (statically_oob) {
1413 : __ emit_jump(trap_label);
1414 : Control* current_block = decoder->control_at(0);
1415 1786 : if (current_block->reachable()) {
1416 1786 : current_block->reachability = kSpecOnlyReachable;
1417 : }
1418 : return true;
1419 : }
1420 :
1421 : DCHECK(!env_->use_trap_handler);
1422 : DCHECK(!FLAG_wasm_no_bounds_checks);
1423 :
1424 36 : uint64_t end_offset = uint64_t{offset} + access_size - 1u;
1425 :
1426 : // If the end offset is larger than the smallest memory, dynamically check
1427 : // the end offset against the actual memory size, which is not known at
1428 : // compile time. Otherwise, only one check is required (see below).
1429 : LiftoffRegister end_offset_reg =
1430 36 : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1431 36 : Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
1432 36 : LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
1433 :
1434 : if (kSystemPointerSize == 8) {
1435 36 : __ LoadConstant(end_offset_reg, WasmValue(end_offset));
1436 : } else {
1437 : __ LoadConstant(end_offset_reg,
1438 : WasmValue(static_cast<uint32_t>(end_offset)));
1439 : }
1440 :
1441 36 : if (end_offset >= env_->min_memory_size) {
1442 : __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1443 : LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
1444 17 : mem_size);
1445 : }
1446 :
1447 : // Just reuse the end_offset register for computing the effective size.
1448 : LiftoffRegister effective_size_reg = end_offset_reg;
1449 36 : __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp());
1450 :
1451 : __ emit_i32_to_intptr(index, index);
1452 :
1453 : __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1454 : LiftoffAssembler::kWasmIntPtr, index,
1455 36 : effective_size_reg.gp());
1456 36 : return false;
1457 : }
1458 :
1459 50 : void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
1460 : Register index, uint32_t offset,
1461 : WasmCodePosition position) {
1462 : // Before making the runtime call, spill all cache registers.
1463 50 : __ SpillAllRegisters();
1464 :
1465 : LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1466 : // Get one register for computing the address (offset + index).
1467 : LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1468 : // Compute offset+index in address.
1469 50 : __ LoadConstant(address, WasmValue(offset));
1470 50 : __ emit_i32_add(address.gp(), address.gp(), index);
1471 :
1472 : // Get a register to hold the stack slot for MemoryTracingInfo.
1473 : LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1474 : // Allocate stack slot for MemoryTracingInfo.
1475 50 : __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
1476 :
1477 : // Now store all information into the MemoryTracingInfo struct.
1478 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
1479 50 : StoreType::kI32Store, pinned);
1480 100 : __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
1481 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
1482 50 : StoreType::kI32Store8, pinned);
1483 100 : __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
1484 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
1485 50 : StoreType::kI32Store8, pinned);
1486 :
1487 50 : source_position_table_builder_.AddPosition(__ pc_offset(),
1488 50 : SourcePosition(position), false);
1489 :
1490 50 : Register args[] = {info.gp()};
1491 50 : GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
1492 : __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
1493 50 : }
1494 :
1495 50 : void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
1496 : Register* args) {
1497 : auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
1498 : compilation_zone_, runtime_function, num_args,
1499 50 : compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
1500 : // Currently, only one argument is supported. More arguments require some
1501 : // caution for the parallel register moves (reuse StackTransferRecipe).
1502 : DCHECK_EQ(1, num_args);
1503 : constexpr size_t kInputShift = 1; // Input 0 is the call target.
1504 : compiler::LinkageLocation param_loc =
1505 50 : call_descriptor->GetInputLocation(kInputShift);
1506 50 : if (param_loc.IsRegister()) {
1507 : Register reg = Register::from_code(param_loc.AsRegister());
1508 : __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
1509 0 : LiftoffAssembler::kWasmIntPtr);
1510 : } else {
1511 : DCHECK(param_loc.IsCallerFrameSlot());
1512 50 : LiftoffStackSlots stack_slots(&asm_);
1513 : stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
1514 50 : LiftoffRegister(args[0])));
1515 50 : stack_slots.Construct();
1516 : }
1517 :
1518 : // Set context to "no context" for the runtime call.
1519 : __ TurboAssembler::Move(kContextRegister,
1520 50 : Smi::FromInt(Context::kNoContext));
1521 50 : Register centry = kJavaScriptCallCodeStartRegister;
1522 50 : LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
1523 50 : __ CallRuntimeWithCEntry(runtime_function, centry);
1524 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1525 50 : Safepoint::kNoLazyDeopt);
1526 50 : }
1527 :
1528 236852 : Register AddMemoryMasking(Register index, uint32_t* offset,
1529 : LiftoffRegList& pinned) {
1530 236852 : if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
1531 236852 : return index;
1532 : }
1533 : DEBUG_CODE_COMMENT("Mask memory index");
1534 : // Make sure that we can overwrite {index}.
1535 0 : if (__ cache_state()->is_used(LiftoffRegister(index))) {
1536 : Register old_index = index;
1537 : pinned.clear(LiftoffRegister(old_index));
1538 0 : index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1539 0 : if (index != old_index) __ Move(index, old_index, kWasmI32);
1540 : }
1541 0 : Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
1542 0 : __ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
1543 0 : __ emit_ptrsize_add(index, index, tmp);
1544 0 : LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
1545 0 : __ emit_ptrsize_and(index, index, tmp);
1546 0 : *offset = 0;
1547 0 : return index;
1548 : }
1549 :
1550 208258 : void LoadMem(FullDecoder* decoder, LoadType type,
1551 : const MemoryAccessImmediate<validate>& imm,
1552 : const Value& index_val, Value* result) {
1553 : ValueType value_type = type.value_type();
1554 104615 : if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "load"))
1555 967 : return;
1556 104616 : LiftoffRegList pinned;
1557 209231 : Register index = pinned.set(__ PopToRegister()).gp();
1558 209230 : if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
1559 : return;
1560 : }
1561 103649 : uint32_t offset = imm.offset;
1562 103649 : index = AddMemoryMasking(index, &offset, pinned);
1563 : DEBUG_CODE_COMMENT("Load from memory");
1564 103648 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1565 103648 : LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
1566 : RegClass rc = reg_class_for(value_type);
1567 : LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
1568 103649 : uint32_t protected_load_pc = 0;
1569 103649 : __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
1570 103650 : if (env_->use_trap_handler) {
1571 : AddOutOfLineTrap(decoder->position(),
1572 : WasmCode::kThrowWasmTrapMemOutOfBounds,
1573 207226 : protected_load_pc);
1574 : }
1575 : __ PushRegister(value_type, value);
1576 :
1577 103649 : if (FLAG_trace_wasm_memory) {
1578 : TraceMemoryOperation(false, type.mem_type().representation(), index,
1579 60 : offset, decoder->position());
1580 : }
1581 : }
1582 :
1583 267330 : void StoreMem(FullDecoder* decoder, StoreType type,
1584 : const MemoryAccessImmediate<validate>& imm,
1585 : const Value& index_val, const Value& value_val) {
1586 : ValueType value_type = type.value_type();
1587 134073 : if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "store"))
1588 837 : return;
1589 134074 : LiftoffRegList pinned;
1590 268148 : LiftoffRegister value = pinned.set(__ PopToRegister());
1591 134074 : Register index = pinned.set(__ PopToRegister(pinned)).gp();
1592 268154 : if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
1593 : return;
1594 : }
1595 133240 : uint32_t offset = imm.offset;
1596 133240 : index = AddMemoryMasking(index, &offset, pinned);
1597 : DEBUG_CODE_COMMENT("Store to memory");
1598 133239 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1599 133239 : LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
1600 133243 : uint32_t protected_store_pc = 0;
1601 : LiftoffRegList outer_pinned;
1602 133243 : if (FLAG_trace_wasm_memory) outer_pinned.set(index);
1603 : __ Store(addr, index, offset, value, type, outer_pinned,
1604 133243 : &protected_store_pc, true);
1605 133237 : if (env_->use_trap_handler) {
1606 : AddOutOfLineTrap(decoder->position(),
1607 : WasmCode::kThrowWasmTrapMemOutOfBounds,
1608 266474 : protected_store_pc);
1609 : }
1610 133243 : if (FLAG_trace_wasm_memory) {
1611 : TraceMemoryOperation(true, type.mem_rep(), index, offset,
1612 40 : decoder->position());
1613 : }
1614 : }
1615 :
1616 546 : void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
1617 546 : Register mem_size = __ GetUnusedRegister(kGpReg).gp();
1618 546 : LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
1619 : __ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
1620 : __ PushRegister(kWasmI32, LiftoffRegister(mem_size));
1621 546 : }
1622 :
1623 1456 : void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
1624 : // Pop the input, then spill all cache registers to make the runtime call.
1625 : LiftoffRegList pinned;
1626 2912 : LiftoffRegister input = pinned.set(__ PopToRegister());
1627 1456 : __ SpillAllRegisters();
1628 :
1629 : constexpr Register kGpReturnReg = kGpReturnRegisters[0];
1630 : static_assert(kLiftoffAssemblerGpCacheRegs & Register::bit<kGpReturnReg>(),
1631 : "first return register is a cache register (needs more "
1632 : "complex code here otherwise)");
1633 : LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
1634 :
1635 : WasmMemoryGrowDescriptor descriptor;
1636 : DCHECK_EQ(0, descriptor.GetStackParameterCount());
1637 : DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
1638 : DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
1639 : descriptor.GetParameterType(0));
1640 :
1641 : Register param_reg = descriptor.GetRegisterParameter(0);
1642 1456 : if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
1643 :
1644 : __ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
1645 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1646 1456 : Safepoint::kNoLazyDeopt);
1647 :
1648 : if (kReturnRegister0 != result.gp()) {
1649 : __ Move(result.gp(), kReturnRegister0, kWasmI32);
1650 : }
1651 :
1652 : __ PushRegister(kWasmI32, result);
1653 1456 : }
1654 :
1655 274559 : void CallDirect(FullDecoder* decoder,
1656 : const CallFunctionImmediate<validate>& imm,
1657 : const Value args[], Value returns[]) {
1658 266950 : if (imm.sig->return_count() > 1)
1659 : return unsupported(decoder, "multi-return");
1660 266264 : if (imm.sig->return_count() == 1 &&
1661 : !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
1662 129326 : "return"))
1663 : return;
1664 :
1665 : auto call_descriptor =
1666 136942 : compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1667 : call_descriptor =
1668 : GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1669 :
1670 136944 : if (imm.index < env_->module->num_imported_functions) {
1671 : // A direct call to an imported function.
1672 : LiftoffRegList pinned;
1673 127092 : Register tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1674 127092 : Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1675 :
1676 127092 : Register imported_targets = tmp;
1677 127092 : LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
1678 : kSystemPointerSize);
1679 : __ Load(LiftoffRegister(target), imported_targets, no_reg,
1680 254184 : imm.index * sizeof(Address), kPointerLoadType, pinned);
1681 :
1682 127092 : Register imported_function_refs = tmp;
1683 127092 : LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
1684 : ImportedFunctionRefs);
1685 127092 : Register imported_function_ref = tmp;
1686 : __ LoadTaggedPointer(
1687 : imported_function_ref, imported_function_refs, no_reg,
1688 254184 : ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
1689 :
1690 : Register* explicit_instance = &imported_function_ref;
1691 127092 : __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
1692 : source_position_table_builder_.AddPosition(
1693 264027 : __ pc_offset(), SourcePosition(decoder->position()), false);
1694 :
1695 127092 : __ CallIndirect(imm.sig, call_descriptor, target);
1696 :
1697 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1698 127092 : Safepoint::kNoLazyDeopt);
1699 :
1700 127092 : __ FinishCall(imm.sig, call_descriptor);
1701 : } else {
1702 : // A direct call within this module just gets the current instance.
1703 9852 : __ PrepareCall(imm.sig, call_descriptor);
1704 :
1705 : source_position_table_builder_.AddPosition(
1706 9843 : __ pc_offset(), SourcePosition(decoder->position()), false);
1707 :
1708 : // Just encode the function index. This will be patched at instantiation.
1709 9864 : Address addr = static_cast<Address>(imm.index);
1710 : __ CallNativeWasmCode(addr);
1711 :
1712 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1713 9846 : Safepoint::kNoLazyDeopt);
1714 :
1715 9855 : __ FinishCall(imm.sig, call_descriptor);
1716 : }
1717 : }
1718 :
1719 12701 : void CallIndirect(FullDecoder* decoder, const Value& index_val,
1720 : const CallIndirectImmediate<validate>& imm,
1721 : const Value args[], Value returns[]) {
1722 4941 : if (imm.sig->return_count() > 1) {
1723 0 : return unsupported(decoder, "multi-return");
1724 : }
1725 4942 : if (imm.sig->return_count() == 1 &&
1726 : !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
1727 1766 : "return")) {
1728 : return;
1729 : }
1730 :
1731 : // Pop the index.
1732 6350 : Register index = __ PopToRegister().gp();
1733 : // If that register is still being used after popping, we move it to another
1734 : // register, because we want to modify that register.
1735 3173 : if (__ cache_state()->is_used(LiftoffRegister(index))) {
1736 : Register new_index =
1737 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index)).gp();
1738 666 : __ Move(new_index, index, kWasmI32);
1739 : index = new_index;
1740 : }
1741 :
1742 : LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1743 : // Get three temporary registers.
1744 : Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1745 3175 : Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1746 : Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1747 :
1748 : // Bounds check against the table size.
1749 : Label* invalid_func_label = AddOutOfLineTrap(
1750 3175 : decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
1751 :
1752 6352 : uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
1753 : DCHECK_GE(canonical_sig_num, 0);
1754 : DCHECK_GE(kMaxInt, canonical_sig_num);
1755 :
1756 : // Compare against table size stored in
1757 : // {instance->indirect_function_table_size}.
1758 3176 : LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
1759 : __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
1760 3178 : index, tmp_const);
1761 :
1762 : // Mask the index to prevent SSCA.
1763 3175 : if (FLAG_untrusted_code_mitigations) {
1764 : DEBUG_CODE_COMMENT("Mask indirect call index");
1765 : // mask = ((index - size) & ~index) >> 31
1766 : // Reuse allocated registers; note: size is still stored in {tmp_const}.
1767 : Register diff = table;
1768 0 : Register neg_index = tmp_const;
1769 : Register mask = scratch;
1770 : // 1) diff = index - size
1771 0 : __ emit_i32_sub(diff, index, tmp_const);
1772 : // 2) neg_index = ~index
1773 0 : __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
1774 : __ emit_i32_xor(neg_index, neg_index, index);
1775 : // 3) mask = diff & neg_index
1776 : __ emit_i32_and(mask, diff, neg_index);
1777 : // 4) mask = mask >> 31
1778 0 : __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(int32_t{31}));
1779 : __ emit_i32_sar(mask, mask, tmp_const, pinned);
1780 :
1781 : // Apply mask.
1782 : __ emit_i32_and(index, index, mask);
1783 : }
1784 :
1785 : DEBUG_CODE_COMMENT("Check indirect call signature");
1786 : // Load the signature from {instance->ift_sig_ids[key]}
1787 3175 : LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
1788 : __ LoadConstant(LiftoffRegister(tmp_const),
1789 3174 : WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
1790 : // TODO(wasm): use a emit_i32_shli() instead of a multiply.
1791 : // (currently cannot use shl on ia32/x64 because it clobbers %rcx).
1792 : __ emit_i32_mul(index, index, tmp_const);
1793 : __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
1794 3172 : pinned);
1795 :
1796 : // Compare against expected signature.
1797 3176 : __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
1798 :
1799 : Label* sig_mismatch_label = AddOutOfLineTrap(
1800 3174 : decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
1801 : __ emit_cond_jump(kUnequal, sig_mismatch_label,
1802 3178 : LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
1803 :
1804 : DEBUG_CODE_COMMENT("Execute indirect call");
1805 : if (kSystemPointerSize == 8) {
1806 : // {index} has already been multiplied by 4. Multiply by another 2.
1807 3176 : __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(2));
1808 : __ emit_i32_mul(index, index, tmp_const);
1809 : }
1810 :
1811 : // Load the instance from {instance->ift_instances[key]}
1812 3176 : LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
1813 : // {index} has already been multiplied by kSystemPointerSizeLog2.
1814 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
1815 : __ LoadTaggedPointer(tmp_const, table, index,
1816 : ObjectAccess::ElementOffsetInTaggedFixedArray(0),
1817 3175 : pinned);
1818 : Register* explicit_instance = &tmp_const;
1819 :
1820 : // Load the target from {instance->ift_targets[key]}
1821 3178 : LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
1822 : kSystemPointerSize);
1823 : __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
1824 3178 : pinned);
1825 :
1826 : source_position_table_builder_.AddPosition(
1827 6354 : __ pc_offset(), SourcePosition(decoder->position()), false);
1828 :
1829 : auto call_descriptor =
1830 3178 : compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1831 : call_descriptor =
1832 : GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1833 :
1834 3173 : Register target = scratch;
1835 3173 : __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
1836 3177 : __ CallIndirect(imm.sig, call_descriptor, target);
1837 :
1838 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1839 3176 : Safepoint::kNoLazyDeopt);
1840 :
1841 3178 : __ FinishCall(imm.sig, call_descriptor);
1842 : }
1843 :
1844 0 : void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1845 : Value* result) {
1846 : unsupported(decoder, "simd");
1847 0 : }
1848 0 : void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
1849 : const SimdLaneImmediate<validate>& imm,
1850 : const Vector<Value> inputs, Value* result) {
1851 : unsupported(decoder, "simd");
1852 0 : }
1853 0 : void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
1854 : const SimdShiftImmediate<validate>& imm, const Value& input,
1855 : Value* result) {
1856 : unsupported(decoder, "simd");
1857 0 : }
1858 0 : void Simd8x16ShuffleOp(FullDecoder* decoder,
1859 : const Simd8x16ShuffleImmediate<validate>& imm,
1860 : const Value& input0, const Value& input1,
1861 : Value* result) {
1862 : unsupported(decoder, "simd");
1863 0 : }
1864 135 : void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
1865 : const Vector<Value>& args) {
1866 : unsupported(decoder, "throw");
1867 135 : }
1868 0 : void Rethrow(FullDecoder* decoder, const Value& exception) {
1869 : unsupported(decoder, "rethrow");
1870 0 : }
1871 0 : void BrOnException(FullDecoder* decoder, const Value& exception,
1872 : const ExceptionIndexImmediate<validate>& imm,
1873 : uint32_t depth, Vector<Value> values) {
1874 : unsupported(decoder, "br_on_exn");
1875 0 : }
1876 15004 : void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1877 : const MemoryAccessImmediate<validate>& imm, Value* result) {
1878 : unsupported(decoder, "atomicop");
1879 14998 : }
1880 36 : void MemoryInit(FullDecoder* decoder,
1881 : const MemoryInitImmediate<validate>& imm, const Value& dst,
1882 : const Value& src, const Value& size) {
1883 : unsupported(decoder, "memory.init");
1884 36 : }
1885 18 : void MemoryDrop(FullDecoder* decoder,
1886 : const MemoryDropImmediate<validate>& imm) {
1887 : unsupported(decoder, "memory.drop");
1888 18 : }
1889 27 : void MemoryCopy(FullDecoder* decoder,
1890 : const MemoryIndexImmediate<validate>& imm, const Value& dst,
1891 : const Value& src, const Value& size) {
1892 : unsupported(decoder, "memory.copy");
1893 27 : }
1894 27 : void MemoryFill(FullDecoder* decoder,
1895 : const MemoryIndexImmediate<validate>& imm, const Value& dst,
1896 : const Value& value, const Value& size) {
1897 : unsupported(decoder, "memory.fill");
1898 27 : }
1899 9 : void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
1900 : Vector<Value> args) {
1901 : unsupported(decoder, "table.init");
1902 9 : }
1903 18 : void TableDrop(FullDecoder* decoder,
1904 : const TableDropImmediate<validate>& imm) {
1905 : unsupported(decoder, "table.drop");
1906 18 : }
1907 45 : void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
1908 : Vector<Value> args) {
1909 : unsupported(decoder, "table.copy");
1910 45 : }
1911 :
1912 : private:
1913 : LiftoffAssembler asm_;
1914 : compiler::CallDescriptor* const descriptor_;
1915 : CompilationEnv* const env_;
1916 : bool ok_ = true;
1917 : std::vector<OutOfLineCode> out_of_line_code_;
1918 : SourcePositionTableBuilder source_position_table_builder_;
1919 : std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
1920 : // Zone used to store information during compilation. The result will be
1921 : // stored independently, such that this zone can die together with the
1922 : // LiftoffCompiler after compilation.
1923 : Zone* compilation_zone_;
1924 : SafepointTableBuilder safepoint_table_builder_;
1925 : // The pc offset of the instructions to reserve the stack frame. Needed to
1926 : // patch the actually needed stack size in the end.
1927 : uint32_t pc_offset_stack_frame_construction_ = 0;
1928 :
1929 : void TraceCacheState(FullDecoder* decoder) const {
1930 : #ifdef DEBUG
1931 : if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
1932 : StdoutStream os;
1933 : for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
1934 : --control_depth) {
1935 : auto* cache_state =
1936 : control_depth == -1 ? __ cache_state()
1937 : : &decoder->control_at(control_depth)
1938 : ->label_state;
1939 : os << PrintCollection(cache_state->stack_state);
1940 : if (control_depth != -1) PrintF("; ");
1941 : }
1942 : os << "\n";
1943 : #endif
1944 : }
1945 :
1946 : DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
1947 : };
1948 :
1949 : } // namespace
1950 :
1951 711909 : bool LiftoffCompilationUnit::ExecuteCompilation(CompilationEnv* env,
1952 : NativeModule* native_module,
1953 : const FunctionBody& func_body,
1954 : Counters* counters,
1955 : WasmFeatures* detected) {
1956 1423827 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
1957 : "ExecuteLiftoffCompilation");
1958 : base::ElapsedTimer compile_timer;
1959 : if (FLAG_trace_wasm_decode_time) {
1960 : compile_timer.Start();
1961 : }
1962 :
1963 1423878 : Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
1964 711876 : const WasmModule* module = env ? env->module : nullptr;
1965 711876 : auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
1966 : base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
1967 : base::in_place, counters->liftoff_compile_time());
1968 : WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
1969 : &zone, module, env->enabled_features, detected, func_body,
1970 1423842 : call_descriptor, env, &zone);
1971 711835 : decoder.Decode();
1972 : liftoff_compile_time_scope.reset();
1973 687105 : LiftoffCompiler* compiler = &decoder.interface();
1974 711579 : if (decoder.failed()) return false; // validation error
1975 687105 : if (!compiler->ok()) {
1976 : // Liftoff compilation failed.
1977 0 : counters->liftoff_unsupported_functions()->Increment();
1978 0 : return false;
1979 : }
1980 :
1981 687105 : counters->liftoff_compiled_functions()->Increment();
1982 :
1983 : if (FLAG_trace_wasm_decode_time) {
1984 : double compile_ms = compile_timer.Elapsed().InMillisecondsF();
1985 : PrintF(
1986 : "wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
1987 : "compile\n",
1988 : static_cast<unsigned>(func_body.end - func_body.start), compile_ms);
1989 : }
1990 :
1991 687034 : CodeDesc desc;
1992 : compiler->GetCode(&desc);
1993 : OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
1994 : OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
1995 : compiler->GetProtectedInstructions();
1996 : uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
1997 : int safepoint_table_offset = compiler->GetSafepointTableOffset();
1998 :
1999 : WasmCode* code = native_module->AddCode(
2000 : wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
2001 : 0, std::move(protected_instructions), std::move(source_positions),
2002 2061987 : WasmCode::kFunction, WasmCode::kLiftoff);
2003 687421 : wasm_unit_->SetResult(code, counters);
2004 :
2005 711970 : return true;
2006 : }
2007 :
2008 : #undef __
2009 : #undef TRACE
2010 : #undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
2011 : #undef WASM_INSTANCE_OBJECT_FIELD_SIZE
2012 : #undef LOAD_INSTANCE_FIELD
2013 : #undef LOAD_TAGGED_PTR_INSTANCE_FIELD
2014 : #undef DEBUG_CODE_COMMENT
2015 :
2016 : } // namespace wasm
2017 : } // namespace internal
2018 183867 : } // namespace v8
|