Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/wasm/baseline/liftoff-compiler.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/base/optional.h"
9 : // TODO(clemensh): Remove dependences on compiler stuff.
10 : #include "src/compiler/linkage.h"
11 : #include "src/compiler/wasm-compiler.h"
12 : #include "src/counters.h"
13 : #include "src/interface-descriptors.h"
14 : #include "src/log.h"
15 : #include "src/macro-assembler-inl.h"
16 : #include "src/objects/smi.h"
17 : #include "src/ostreams.h"
18 : #include "src/tracing/trace-event.h"
19 : #include "src/utils.h"
20 : #include "src/wasm/baseline/liftoff-assembler.h"
21 : #include "src/wasm/function-body-decoder-impl.h"
22 : #include "src/wasm/function-compiler.h"
23 : #include "src/wasm/memory-tracing.h"
24 : #include "src/wasm/object-access.h"
25 : #include "src/wasm/wasm-engine.h"
26 : #include "src/wasm/wasm-linkage.h"
27 : #include "src/wasm/wasm-objects.h"
28 : #include "src/wasm/wasm-opcodes.h"
29 :
30 : namespace v8 {
31 : namespace internal {
32 : namespace wasm {
33 :
34 : constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
35 : constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst;
36 : constexpr auto kStack = LiftoffAssembler::VarState::kStack;
37 :
38 : namespace {
39 :
40 : #define __ asm_.
41 :
42 : #define TRACE(...) \
43 : do { \
44 : if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
45 : } while (false)
46 :
47 : #define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
48 : ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
49 :
50 : template <int expected_size, int actual_size>
51 : struct assert_field_size {
52 : static_assert(expected_size == actual_size,
53 : "field in WasmInstance does not have the expected size");
54 : static constexpr int size = actual_size;
55 : };
56 :
57 : #define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
58 : FIELD_SIZE(WasmInstanceObject::k##name##Offset)
59 :
60 : #define LOAD_INSTANCE_FIELD(dst, name, load_size) \
61 : __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
62 : assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
63 : load_size>::size);
64 :
65 : #define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
66 : static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
67 : "field in WasmInstance does not have the expected size"); \
68 : __ LoadTaggedPointerFromInstance(dst, \
69 : WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
70 :
71 : #ifdef DEBUG
72 : #define DEBUG_CODE_COMMENT(str) \
73 : do { \
74 : __ RecordComment(str); \
75 : } while (false)
76 : #else
77 : #define DEBUG_CODE_COMMENT(str) ((void)0)
78 : #endif
79 :
80 : constexpr LoadType::LoadTypeValue kPointerLoadType =
81 : kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
82 :
83 : #if V8_TARGET_ARCH_ARM64
84 : // On ARM64, the Assembler keeps track of pointers to Labels to resolve
85 : // branches to distant targets. Moving labels would confuse the Assembler,
86 : // thus store the label on the heap and keep a unique_ptr.
87 : class MovableLabel {
88 : public:
89 : MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
90 : MovableLabel() : label_(new Label()) {}
91 :
92 : Label* get() { return label_.get(); }
93 :
94 : private:
95 : std::unique_ptr<Label> label_;
96 : };
97 : #else
98 : // On all other platforms, just store the Label directly.
99 : class MovableLabel {
100 : public:
101 : MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
102 :
103 1905418 : Label* get() { return &label_; }
104 :
105 : private:
106 : Label label_;
107 : };
108 : #endif
109 :
110 : compiler::CallDescriptor* GetLoweredCallDescriptor(
111 : Zone* zone, compiler::CallDescriptor* call_desc) {
112 : return kSystemPointerSize == 4
113 : ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
114 : : call_desc;
115 : }
116 :
117 : constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
118 : kWasmF64};
119 : constexpr Vector<const ValueType> kSupportedTypes =
120 : ArrayVector(kSupportedTypesArr);
121 :
122 1189047 : class LiftoffCompiler {
123 : public:
124 : // TODO(clemensh): Make this a template parameter.
125 : static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
126 :
127 : using Value = ValueBase;
128 :
129 3221 : struct ElseState {
130 : MovableLabel label;
131 : LiftoffAssembler::CacheState state;
132 : };
133 :
134 1775247 : struct Control : public ControlBase<Value> {
135 : std::unique_ptr<ElseState> else_state;
136 : LiftoffAssembler::CacheState label_state;
137 : MovableLabel label;
138 :
139 303462 : MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
140 :
141 : template <typename... Args>
142 : explicit Control(Args&&... args) V8_NOEXCEPT
143 : : ControlBase(std::forward<Args>(args)...) {}
144 : };
145 :
146 : using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
147 :
148 : struct OutOfLineCode {
149 : MovableLabel label;
150 : MovableLabel continuation;
151 : WasmCode::RuntimeStubId stub;
152 : WasmCodePosition position;
153 : LiftoffRegList regs_to_save;
154 : uint32_t pc; // for trap handler.
155 :
156 : // Named constructors:
157 : static OutOfLineCode Trap(WasmCode::RuntimeStubId s, WasmCodePosition pos,
158 : uint32_t pc) {
159 : DCHECK_LT(0, pos);
160 385766 : return {{}, {}, s, pos, {}, pc};
161 : }
162 : static OutOfLineCode StackCheck(WasmCodePosition pos, LiftoffRegList regs) {
163 233935 : return {{}, {}, WasmCode::kWasmStackGuard, pos, regs, 0};
164 : }
165 : };
166 :
167 594724 : LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
168 : CompilationEnv* env, Zone* compilation_zone,
169 : std::unique_ptr<AssemblerBuffer> buffer)
170 : : asm_(std::move(buffer)),
171 : descriptor_(
172 : GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
173 : env_(env),
174 : compilation_zone_(compilation_zone),
175 2973316 : safepoint_table_builder_(compilation_zone_) {}
176 :
177 : bool ok() const { return ok_; }
178 :
179 : void GetCode(CodeDesc* desc) {
180 : asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
181 568335 : Assembler::kNoHandlerTable);
182 : }
183 :
184 : OwnedVector<uint8_t> GetSourcePositionTable() {
185 567867 : return source_position_table_builder_.ToSourcePositionTableVector();
186 : }
187 :
188 : OwnedVector<trap_handler::ProtectedInstructionData> GetProtectedInstructions()
189 : const {
190 : return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
191 568433 : protected_instructions_);
192 : }
193 :
194 : uint32_t GetTotalFrameSlotCount() const {
195 : return __ GetTotalFrameSlotCount();
196 : }
197 :
198 : void unsupported(FullDecoder* decoder, const char* reason) {
199 17905 : ok_ = false;
200 : TRACE("unsupported: %s\n", reason);
201 2266 : decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
202 17905 : reason);
203 : UnuseLabels(decoder);
204 : }
205 :
206 1727707 : bool DidAssemblerBailout(FullDecoder* decoder) {
207 1727707 : if (decoder->failed() || !__ did_bailout()) return false;
208 : unsupported(decoder, __ bailout_reason());
209 0 : return true;
210 : }
211 :
212 563908 : bool CheckSupportedType(FullDecoder* decoder,
213 : Vector<const ValueType> supported_types,
214 : ValueType type, const char* context) {
215 : char buffer[128];
216 : // Check supported types.
217 1432130 : for (ValueType supported : supported_types) {
218 997308 : if (type == supported) return true;
219 : }
220 711 : SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
221 : unsupported(decoder, buffer);
222 709 : return false;
223 : }
224 :
225 : int GetSafepointTableOffset() const {
226 : return safepoint_table_builder_.GetCodeOffset();
227 : }
228 :
229 : void UnuseLabels(FullDecoder* decoder) {
230 : #ifdef DEBUG
231 : auto Unuse = [](Label* label) {
232 : label->Unuse();
233 : label->UnuseNear();
234 : };
235 : // Unuse all labels now, otherwise their destructor will fire a DCHECK error
236 : // if they where referenced before.
237 : uint32_t control_depth = decoder ? decoder->control_depth() : 0;
238 : for (uint32_t i = 0; i < control_depth; ++i) {
239 : Control* c = decoder->control_at(i);
240 : Unuse(c->label.get());
241 : if (c->else_state) Unuse(c->else_state->label.get());
242 : }
243 : for (auto& ool : out_of_line_code_) Unuse(ool.label.get());
244 : #endif
245 : }
246 :
247 594063 : void StartFunction(FullDecoder* decoder) {
248 594063 : int num_locals = decoder->num_locals();
249 594063 : __ set_num_locals(num_locals);
250 1092530 : for (int i = 0; i < num_locals; ++i) {
251 249162 : __ set_local_type(i, decoder->GetLocalType(i));
252 : }
253 594206 : }
254 :
255 : // Returns the number of inputs processed (1 or 2).
256 166708 : uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
257 : const int num_lowered_params = 1 + needs_reg_pair(type);
258 : ValueType lowered_type = needs_reg_pair(type) ? kWasmI32 : type;
259 : RegClass rc = reg_class_for(lowered_type);
260 : // Initialize to anything, will be set in the loop and used afterwards.
261 : LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
262 : LiftoffRegList pinned;
263 499898 : for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
264 : compiler::LinkageLocation param_loc =
265 166687 : descriptor_->GetInputLocation(input_idx + pair_idx);
266 : // Initialize to anything, will be set in both arms of the if.
267 : LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
268 166687 : if (param_loc.IsRegister()) {
269 : DCHECK(!param_loc.IsAnyRegister());
270 : int reg_code = param_loc.AsRegister();
271 : #if V8_TARGET_ARCH_ARM
272 : // Liftoff assumes a one-to-one mapping between float registers and
273 : // double registers, and so does not distinguish between f32 and f64
274 : // registers. The f32 register code must therefore be halved in order to
275 : // pass the f64 code to Liftoff.
276 : DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0);
277 : if (type == kWasmF32) {
278 : reg_code /= 2;
279 : }
280 : #endif
281 : RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
282 135391 : : kLiftoffAssemblerFpCacheRegs;
283 135391 : if (cache_regs & (1ULL << reg_code)) {
284 : // This is a cache register, just use it.
285 135391 : in_reg = LiftoffRegister::from_code(rc, reg_code);
286 : } else {
287 : // Move to a cache register (spill one if necessary).
288 : // Note that we cannot create a {LiftoffRegister} for reg_code, since
289 : // {LiftoffRegister} can only store cache regs.
290 0 : in_reg = __ GetUnusedRegister(rc, pinned);
291 0 : if (rc == kGpReg) {
292 0 : __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
293 : } else {
294 0 : __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
295 0 : lowered_type);
296 : }
297 : }
298 31296 : } else if (param_loc.IsCallerFrameSlot()) {
299 31296 : in_reg = __ GetUnusedRegister(rc, pinned);
300 62592 : __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
301 31296 : lowered_type);
302 : }
303 : reg = pair_idx == 0 ? in_reg
304 166595 : : LiftoffRegister::ForPair(reg.gp(), in_reg.gp());
305 : pinned.set(reg);
306 : }
307 : __ PushRegister(type, reg);
308 166679 : return num_lowered_params;
309 : }
310 :
311 595643 : void StackCheck(WasmCodePosition position) {
312 957351 : if (FLAG_wasm_no_stack_checks || !env_->runtime_exception_support) return;
313 233935 : out_of_line_code_.push_back(
314 235699 : OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
315 : OutOfLineCode& ool = out_of_line_code_.back();
316 469741 : Register limit_address = __ GetUnusedRegister(kGpReg).gp();
317 234042 : LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
318 235152 : __ StackCheck(ool.label.get(), limit_address);
319 234005 : __ bind(ool.continuation.get());
320 : }
321 :
322 593707 : void StartFunctionBody(FullDecoder* decoder, Control* block) {
323 1087639 : for (uint32_t i = 0; i < __ num_locals(); ++i) {
324 247344 : if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
325 : "param"))
326 470 : return;
327 : }
328 :
329 : // Input 0 is the call target, the instance is at 1.
330 : constexpr int kInstanceParameterIndex = 1;
331 : // Store the instance parameter to a special stack slot.
332 : compiler::LinkageLocation instance_loc =
333 593329 : descriptor_->GetInputLocation(kInstanceParameterIndex);
334 : DCHECK(instance_loc.IsRegister());
335 : DCHECK(!instance_loc.IsAnyRegister());
336 593329 : Register instance_reg = Register::from_code(instance_loc.AsRegister());
337 : DCHECK_EQ(kWasmInstanceRegister, instance_reg);
338 :
339 : // Parameter 0 is the instance parameter.
340 : uint32_t num_params =
341 593329 : static_cast<uint32_t>(decoder->sig_->parameter_count());
342 :
343 593329 : __ EnterFrame(StackFrame::WASM_COMPILED);
344 : __ set_has_frame(true);
345 593009 : pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
346 : // {PrepareStackFrame} is the first platform-specific assembler method.
347 : // If this failed, we can bail out immediately, avoiding runtime overhead
348 : // and potential failures because of other unimplemented methods.
349 : // A platform implementing {PrepareStackFrame} must ensure that we can
350 : // finish compilation without errors even if we hit unimplemented
351 : // LiftoffAssembler methods.
352 593009 : if (DidAssemblerBailout(decoder)) return;
353 :
354 593144 : __ SpillInstance(instance_reg);
355 : // Input 0 is the code target, 1 is the instance. First parameter at 2.
356 : uint32_t input_idx = kInstanceParameterIndex + 1;
357 926645 : for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
358 166635 : input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
359 : }
360 : DCHECK_EQ(input_idx, descriptor_->InputCount());
361 : // Set to a gp register, to mark this uninitialized.
362 : LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet();
363 : DCHECK(zero_double_reg.is_gp());
364 753491 : for (uint32_t param_idx = num_params; param_idx < __ num_locals();
365 : ++param_idx) {
366 : ValueType type = decoder->GetLocalType(param_idx);
367 80090 : switch (type) {
368 : case kWasmI32:
369 37752 : __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
370 : break;
371 : case kWasmI64:
372 41026 : __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
373 : break;
374 : case kWasmF32:
375 : case kWasmF64:
376 1312 : if (zero_double_reg.is_gp()) {
377 : // Note: This might spill one of the registers used to hold
378 : // parameters.
379 : zero_double_reg = __ GetUnusedRegister(kFpReg);
380 : // Zero is represented by the bit pattern 0 for both f32 and f64.
381 748 : __ LoadConstant(zero_double_reg, WasmValue(0.));
382 : }
383 : __ PushRegister(type, zero_double_reg);
384 : break;
385 : default:
386 0 : UNIMPLEMENTED();
387 : }
388 : }
389 :
390 : // The function-prologue stack check is associated with position 0, which
391 : // is never a position of any instruction in the function.
392 593317 : StackCheck(0);
393 :
394 : DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
395 : }
396 :
397 567523 : void GenerateOutOfLineCode(OutOfLineCode& ool) {
398 567523 : __ bind(ool.label.get());
399 567680 : const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
400 : const bool is_mem_out_of_bounds =
401 : ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
402 :
403 567680 : if (is_mem_out_of_bounds && env_->use_trap_handler) {
404 172017 : uint32_t pc = static_cast<uint32_t>(__ pc_offset());
405 : DCHECK_EQ(pc, __ pc_offset());
406 344087 : protected_instructions_.emplace_back(
407 344034 : trap_handler::ProtectedInstructionData{ool.pc, pc});
408 : }
409 :
410 567733 : if (!env_->runtime_exception_support) {
411 : // We cannot test calls to the runtime in cctest/test-run-wasm.
412 : // Therefore we emit a call to C here instead of a call to the runtime.
413 : // In this mode, we never generate stack checks.
414 : DCHECK(!is_stack_check);
415 217640 : __ CallTrapCallbackForTesting();
416 217640 : __ LeaveFrame(StackFrame::WASM_COMPILED);
417 217640 : __ DropStackSlotsAndRet(
418 217640 : static_cast<uint32_t>(descriptor_->StackParameterCount()));
419 : return;
420 : }
421 :
422 350093 : if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
423 :
424 1051996 : source_position_table_builder_.AddPosition(
425 350219 : __ pc_offset(), SourcePosition(ool.position), false);
426 351558 : __ CallRuntimeStub(ool.stub);
427 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
428 349936 : Safepoint::kNoLazyDeopt);
429 : DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
430 350804 : if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
431 350846 : if (is_stack_check) {
432 : __ emit_jump(ool.continuation.get());
433 : } else {
434 : __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
435 : }
436 : }
437 :
438 568158 : void FinishFunction(FullDecoder* decoder) {
439 568158 : if (DidAssemblerBailout(decoder)) return;
440 1136821 : for (OutOfLineCode& ool : out_of_line_code_) {
441 567603 : GenerateOutOfLineCode(ool);
442 : }
443 569218 : __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
444 569218 : __ GetTotalFrameSlotCount());
445 : __ FinishCode();
446 569573 : safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
447 : __ MaybeEmitOutOfLineConstantPool();
448 : // The previous calls may have also generated a bailout.
449 568914 : DidAssemblerBailout(decoder);
450 : }
451 :
452 : void OnFirstError(FullDecoder* decoder) {
453 50857 : ok_ = false;
454 : UnuseLabels(decoder);
455 : asm_.AbortCompilation();
456 : }
457 :
458 : void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
459 : TraceCacheState(decoder);
460 : SLOW_DCHECK(__ ValidateCacheState());
461 : DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
462 : }
463 :
464 : void Block(FullDecoder* decoder, Control* block) {}
465 :
466 2329 : void Loop(FullDecoder* decoder, Control* loop) {
467 : // Before entering a loop, spill all locals to the stack, in order to free
468 : // the cache registers, and to avoid unnecessarily reloading stack values
469 : // into registers at branches.
470 : // TODO(clemensh): Come up with a better strategy here, involving
471 : // pre-analysis of the function.
472 2329 : __ SpillLocals();
473 :
474 : // Loop labels bind at the beginning of the block.
475 2329 : __ bind(loop->label.get());
476 :
477 : // Save the current cache state for the merge when jumping to this loop.
478 2330 : loop->label_state.Split(*__ cache_state());
479 :
480 : // Execute a stack check in the loop header.
481 2328 : StackCheck(decoder->position());
482 2329 : }
483 :
484 : void Try(FullDecoder* decoder, Control* block) {
485 : unsupported(decoder, "try");
486 : }
487 :
488 : void Catch(FullDecoder* decoder, Control* block, Value* exception) {
489 : unsupported(decoder, "catch");
490 : }
491 :
492 3247 : void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
493 : DCHECK_EQ(if_block, decoder->control_at(0));
494 : DCHECK(if_block->is_if());
495 :
496 3247 : if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
497 28 : return unsupported(decoder, "multi-value if");
498 :
499 : // Allocate the else state.
500 6450 : if_block->else_state = base::make_unique<ElseState>();
501 :
502 : // Test the condition, jump to else if zero.
503 6457 : Register value = __ PopToRegister().gp();
504 : __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
505 3225 : value);
506 :
507 : // Store the state (after popping the value) for executing the else branch.
508 3223 : if_block->else_state->state.Split(*__ cache_state());
509 : }
510 :
511 0 : void FallThruTo(FullDecoder* decoder, Control* c) {
512 0 : if (c->end_merge.reached) {
513 0 : __ MergeFullStackWith(c->label_state, *__ cache_state());
514 : } else {
515 0 : c->label_state.Split(*__ cache_state());
516 : }
517 : TraceCacheState(decoder);
518 0 : }
519 :
520 1120 : void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
521 : DCHECK(c->is_onearmed_if());
522 1120 : if (c->end_merge.reached) {
523 : // Someone already merged to the end of the if. Merge both arms into that.
524 52 : if (c->reachable()) {
525 : // Merge the if state into the end state.
526 0 : __ MergeFullStackWith(c->label_state, *__ cache_state());
527 : __ emit_jump(c->label.get());
528 : }
529 : // Merge the else state into the end state.
530 52 : __ bind(c->else_state->label.get());
531 52 : __ MergeFullStackWith(c->label_state, c->else_state->state);
532 52 : __ cache_state()->Steal(c->label_state);
533 1068 : } else if (c->reachable()) {
534 : // No merge yet at the end of the if, but we need to create a merge for
535 : // the both arms of this if. Thus init the merge point from the else
536 : // state, then merge the if state into that.
537 : DCHECK_EQ(0, c->end_merge.arity);
538 1182 : c->label_state.InitMerge(c->else_state->state, __ num_locals(), 0,
539 591 : c->stack_depth);
540 590 : __ MergeFullStackWith(c->label_state, *__ cache_state());
541 : __ emit_jump(c->label.get());
542 : // Merge the else state into the end state.
543 590 : __ bind(c->else_state->label.get());
544 589 : __ MergeFullStackWith(c->label_state, c->else_state->state);
545 591 : __ cache_state()->Steal(c->label_state);
546 : } else {
547 : // No merge needed, just continue with the else state.
548 477 : __ bind(c->else_state->label.get());
549 954 : __ cache_state()->Steal(c->else_state->state);
550 : }
551 1120 : }
552 :
553 138468 : void PopControl(FullDecoder* decoder, Control* c) {
554 138468 : if (c->is_loop()) return; // A loop just falls through.
555 136531 : if (c->is_onearmed_if()) {
556 : // Special handling for one-armed ifs.
557 1120 : FinishOneArmedIf(decoder, c);
558 135411 : } else if (c->end_merge.reached) {
559 : // There is a merge already. Merge our state into that, then continue with
560 : // that state.
561 121234 : if (c->reachable()) {
562 2297 : __ MergeFullStackWith(c->label_state, *__ cache_state());
563 : }
564 242462 : __ cache_state()->Steal(c->label_state);
565 : } else {
566 : // No merge, just continue with our current state.
567 : }
568 :
569 273064 : if (!c->label.get()->is_bound()) __ bind(c->label.get());
570 : }
571 :
572 : void EndControl(FullDecoder* decoder, Control* c) {}
573 :
574 : enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
575 :
576 26952 : void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
577 : ValueType out_argument_type,
578 : const LiftoffRegister* arg_regs,
579 : ExternalReference ext_ref) {
580 : // Before making a call, spill all cache registers.
581 26952 : __ SpillAllRegisters();
582 :
583 : // Store arguments on our stack, then align the stack for calling to C.
584 26951 : int param_bytes = 0;
585 134755 : for (ValueType param_type : sig->parameters()) {
586 53902 : param_bytes += ValueTypes::MemSize(param_type);
587 : }
588 : int out_arg_bytes = out_argument_type == kWasmStmt
589 : ? 0
590 53902 : : ValueTypes::MemSize(out_argument_type);
591 26951 : int stack_bytes = std::max(param_bytes, out_arg_bytes);
592 26951 : __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
593 26951 : ext_ref);
594 26952 : }
595 :
596 : template <ValueType src_type, ValueType result_type, class EmitFn>
597 107612 : void EmitUnOp(EmitFn fn) {
598 : static RegClass src_rc = reg_class_for(src_type);
599 : static RegClass result_rc = reg_class_for(result_type);
600 107612 : LiftoffRegister src = __ PopToRegister();
601 : LiftoffRegister dst = src_rc == result_rc
602 215230 : ? __ GetUnusedRegister(result_rc, {src})
603 215228 : : __ GetUnusedRegister(result_rc);
604 231 : fn(dst, src);
605 : __ PushRegister(result_type, dst);
606 107614 : }
607 :
608 : void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
609 : Register),
610 : ExternalReference (*fallback_fn)()) {
611 76 : auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
612 228 : if (emit_fn && (asm_.*emit_fn)(dst.gp(), src.gp())) return;
613 0 : ExternalReference ext_ref = fallback_fn();
614 0 : ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
615 : FunctionSig sig_i_i(1, 1, sig_i_i_reps);
616 0 : GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, ext_ref);
617 76 : };
618 76 : EmitUnOp<kWasmI32, kWasmI32>(emit_with_c_fallback);
619 : }
620 :
621 : template <ValueType type>
622 : void EmitFloatUnOpWithCFallback(
623 : bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
624 : ExternalReference (*fallback_fn)()) {
625 153 : auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
626 461 : if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
627 0 : ExternalReference ext_ref = fallback_fn();
628 0 : ValueType sig_reps[] = {type};
629 : FunctionSig sig(0, 1, sig_reps);
630 0 : GenerateCCall(&dst, &sig, type, &src, ext_ref);
631 160 : };
632 160 : EmitUnOp<type, type>(emit_with_c_fallback);
633 : }
634 :
635 : enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
636 : template <ValueType dst_type, ValueType src_type,
637 : TypeConversionTrapping can_trap>
638 118640 : void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
639 : WasmCodePosition trap_position) {
640 : static constexpr RegClass src_rc = reg_class_for(src_type);
641 : static constexpr RegClass dst_rc = reg_class_for(dst_type);
642 118640 : LiftoffRegister src = __ PopToRegister();
643 30268 : LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
644 117437 : : __ GetUnusedRegister(dst_rc);
645 : DCHECK_EQ(!!can_trap, trap_position > 0);
646 : Label* trap = can_trap ? AddOutOfLineTrap(
647 : trap_position,
648 : WasmCode::kThrowWasmTrapFloatUnrepresentable)
649 : : nullptr;
650 118672 : if (!__ emit_type_conversion(opcode, dst, src, trap)) {
651 : DCHECK_NOT_NULL(fallback_fn);
652 0 : ExternalReference ext_ref = fallback_fn();
653 : if (can_trap) {
654 : // External references for potentially trapping conversions return int.
655 0 : ValueType sig_reps[] = {kWasmI32, src_type};
656 : FunctionSig sig(1, 1, sig_reps);
657 : LiftoffRegister ret_reg =
658 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
659 0 : LiftoffRegister dst_regs[] = {ret_reg, dst};
660 0 : GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
661 0 : __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
662 : } else {
663 0 : ValueType sig_reps[] = {src_type};
664 : FunctionSig sig(0, 1, sig_reps);
665 0 : GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
666 : }
667 : }
668 : __ PushRegister(dst_type, dst);
669 118661 : }
670 :
671 226418 : void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
672 : Value* result) {
673 : #define CASE_I32_UNOP(opcode, fn) \
674 : case WasmOpcode::kExpr##opcode: \
675 : EmitUnOp<kWasmI32, kWasmI32>( \
676 : [=](LiftoffRegister dst, LiftoffRegister src) { \
677 : __ emit_##fn(dst.gp(), src.gp()); \
678 : }); \
679 : break;
680 : #define CASE_I32_SIGN_EXTENSION(opcode, fn) \
681 : case WasmOpcode::kExpr##opcode: \
682 : EmitUnOp<kWasmI32, kWasmI32>( \
683 : [=](LiftoffRegister dst, LiftoffRegister src) { \
684 : __ emit_##fn(dst.gp(), src.gp()); \
685 : }); \
686 : break;
687 : #define CASE_I64_SIGN_EXTENSION(opcode, fn) \
688 : case WasmOpcode::kExpr##opcode: \
689 : EmitUnOp<kWasmI64, kWasmI64>( \
690 : [=](LiftoffRegister dst, LiftoffRegister src) { \
691 : __ emit_##fn(dst, src); \
692 : }); \
693 : break;
694 : #define CASE_FLOAT_UNOP(opcode, type, fn) \
695 : case WasmOpcode::kExpr##opcode: \
696 : EmitUnOp<kWasm##type, kWasm##type>( \
697 : [=](LiftoffRegister dst, LiftoffRegister src) { \
698 : __ emit_##fn(dst.fp(), src.fp()); \
699 : }); \
700 : break;
701 : #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
702 : case WasmOpcode::kExpr##opcode: \
703 : EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
704 : &ExternalReference::wasm_##fn); \
705 : break;
706 : #define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
707 : case WasmOpcode::kExpr##opcode: \
708 : EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
709 : kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \
710 : break;
711 226418 : switch (opcode) {
712 209823 : CASE_I32_UNOP(I32Eqz, i32_eqz)
713 2288 : CASE_I32_UNOP(I32Clz, i32_clz)
714 671 : CASE_I32_UNOP(I32Ctz, i32_ctz)
715 64 : CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
716 450 : CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
717 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil)
718 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor)
719 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc)
720 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int)
721 304 : CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
722 64 : CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
723 452 : CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
724 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
725 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
726 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
727 : CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
728 256 : CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
729 752 : CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
730 112 : CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
731 44 : CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap)
732 112 : CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap)
733 36 : CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap)
734 50919 : CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap)
735 72 : CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap)
736 14086 : CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap)
737 36 : CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32,
738 : &ExternalReference::wasm_float32_to_int64, kCanTrap)
739 36 : CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32,
740 : &ExternalReference::wasm_float32_to_uint64, kCanTrap)
741 788 : CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64,
742 : &ExternalReference::wasm_float64_to_int64, kCanTrap)
743 44 : CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64,
744 : &ExternalReference::wasm_float64_to_uint64, kCanTrap)
745 50607 : CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap)
746 72 : CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap)
747 32 : CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap)
748 44 : CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
749 : &ExternalReference::wasm_int64_to_float32, kNoTrap)
750 28 : CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
751 : &ExternalReference::wasm_uint64_to_float32, kNoTrap)
752 78 : CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap)
753 130 : CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap)
754 60 : CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap)
755 64 : CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap)
756 56 : CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
757 : &ExternalReference::wasm_int64_to_float64, kNoTrap)
758 132 : CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
759 : &ExternalReference::wasm_uint64_to_float64, kNoTrap)
760 142 : CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
761 172 : CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
762 24 : CASE_I32_SIGN_EXTENSION(I32SExtendI8, i32_signextend_i8)
763 8 : CASE_I32_SIGN_EXTENSION(I32SExtendI16, i32_signextend_i16)
764 24 : CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8)
765 8 : CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16)
766 8 : CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32)
767 : case kExprI32Popcnt:
768 : EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
769 : &ExternalReference::wasm_word32_popcnt);
770 : break;
771 : case WasmOpcode::kExprI64Eqz:
772 : EmitUnOp<kWasmI64, kWasmI32>(
773 : [=](LiftoffRegister dst, LiftoffRegister src) {
774 162 : __ emit_i64_eqz(dst.gp(), src);
775 163 : });
776 : break;
777 : default:
778 144 : return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
779 : }
780 : #undef CASE_I32_UNOP
781 : #undef CASE_I32_SIGN_EXTENSION
782 : #undef CASE_I64_SIGN_EXTENSION
783 : #undef CASE_FLOAT_UNOP
784 : #undef CASE_FLOAT_UNOP_WITH_CFALLBACK
785 : #undef CASE_TYPE_CONVERSION
786 : }
787 :
788 : template <ValueType src_type, ValueType result_type, typename EmitFn>
789 764974 : void EmitBinOp(EmitFn fn) {
790 : static constexpr RegClass src_rc = reg_class_for(src_type);
791 : static constexpr RegClass result_rc = reg_class_for(result_type);
792 764974 : LiftoffRegister rhs = __ PopToRegister();
793 765046 : LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
794 : LiftoffRegister dst = src_rc == result_rc
795 1527864 : ? __ GetUnusedRegister(result_rc, {lhs, rhs})
796 763932 : : __ GetUnusedRegister(result_rc);
797 137642 : fn(dst, lhs, rhs);
798 : __ PushRegister(result_type, dst);
799 765009 : }
800 :
801 : void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
802 : LiftoffRegister rhs, ExternalReference ext_ref,
803 : Label* trap_by_zero,
804 : Label* trap_unrepresentable = nullptr) {
805 : // Cannot emit native instructions, build C call.
806 : LiftoffRegister ret =
807 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
808 : LiftoffRegister tmp =
809 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
810 : LiftoffRegister arg_regs[] = {lhs, rhs};
811 : LiftoffRegister result_regs[] = {ret, dst};
812 : ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
813 : // <i64, i64> -> i32 (with i64 output argument)
814 : FunctionSig sig(1, 2, sig_types);
815 : GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
816 : __ LoadConstant(tmp, WasmValue(int32_t{0}));
817 : __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
818 : if (trap_unrepresentable) {
819 : __ LoadConstant(tmp, WasmValue(int32_t{-1}));
820 : __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
821 : tmp.gp());
822 : }
823 : }
824 :
825 765230 : void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
826 : const Value& rhs, Value* result) {
827 : #define CASE_I32_BINOP(opcode, fn) \
828 : case WasmOpcode::kExpr##opcode: \
829 : return EmitBinOp<kWasmI32, kWasmI32>( \
830 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
831 : __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
832 : });
833 : #define CASE_I64_BINOP(opcode, fn) \
834 : case WasmOpcode::kExpr##opcode: \
835 : return EmitBinOp<kWasmI64, kWasmI64>( \
836 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
837 : __ emit_##fn(dst, lhs, rhs); \
838 : });
839 : #define CASE_FLOAT_BINOP(opcode, type, fn) \
840 : case WasmOpcode::kExpr##opcode: \
841 : return EmitBinOp<kWasm##type, kWasm##type>( \
842 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
843 : __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
844 : });
845 : #define CASE_I32_CMPOP(opcode, cond) \
846 : case WasmOpcode::kExpr##opcode: \
847 : return EmitBinOp<kWasmI32, kWasmI32>( \
848 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
849 : __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
850 : });
851 : #define CASE_I64_CMPOP(opcode, cond) \
852 : case WasmOpcode::kExpr##opcode: \
853 : return EmitBinOp<kWasmI64, kWasmI32>( \
854 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
855 : __ emit_i64_set_cond(cond, dst.gp(), lhs, rhs); \
856 : });
857 : #define CASE_F32_CMPOP(opcode, cond) \
858 : case WasmOpcode::kExpr##opcode: \
859 : return EmitBinOp<kWasmF32, kWasmI32>( \
860 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
861 : __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
862 : });
863 : #define CASE_F64_CMPOP(opcode, cond) \
864 : case WasmOpcode::kExpr##opcode: \
865 : return EmitBinOp<kWasmF64, kWasmI32>( \
866 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
867 : __ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
868 : });
869 : #define CASE_I32_SHIFTOP(opcode, fn) \
870 : case WasmOpcode::kExpr##opcode: \
871 : return EmitBinOp<kWasmI32, kWasmI32>( \
872 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
873 : __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
874 : });
875 : #define CASE_I64_SHIFTOP(opcode, fn) \
876 : case WasmOpcode::kExpr##opcode: \
877 : return EmitBinOp<kWasmI64, kWasmI64>([=](LiftoffRegister dst, \
878 : LiftoffRegister src, \
879 : LiftoffRegister amount) { \
880 : __ emit_##fn(dst, src, amount.is_pair() ? amount.low_gp() : amount.gp(), \
881 : {}); \
882 : });
883 : #define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
884 : case WasmOpcode::kExpr##opcode: \
885 : return EmitBinOp<kWasmI32, kWasmI32>( \
886 : [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
887 : LiftoffRegister args[] = {lhs, rhs}; \
888 : auto ext_ref = ExternalReference::ext_ref_fn(); \
889 : ValueType sig_i_ii_reps[] = {kWasmI32, kWasmI32, kWasmI32}; \
890 : FunctionSig sig_i_ii(1, 2, sig_i_ii_reps); \
891 : GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
892 : });
893 765230 : switch (opcode) {
894 556196 : CASE_I32_BINOP(I32Add, i32_add)
895 47532 : CASE_I32_BINOP(I32Sub, i32_sub)
896 28893 : CASE_I32_BINOP(I32Mul, i32_mul)
897 42781 : CASE_I32_BINOP(I32And, i32_and)
898 27221 : CASE_I32_BINOP(I32Ior, i32_or)
899 26952 : CASE_I32_BINOP(I32Xor, i32_xor)
900 15347 : CASE_I64_BINOP(I64And, i64_and)
901 14167 : CASE_I64_BINOP(I64Ior, i64_or)
902 104 : CASE_I64_BINOP(I64Xor, i64_xor)
903 167938 : CASE_I32_CMPOP(I32Eq, kEqual)
904 27160 : CASE_I32_CMPOP(I32Ne, kUnequal)
905 27034 : CASE_I32_CMPOP(I32LtS, kSignedLessThan)
906 27112 : CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
907 26984 : CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
908 26984 : CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
909 27112 : CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
910 27032 : CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
911 26968 : CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
912 27008 : CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
913 1360 : CASE_I64_BINOP(I64Add, i64_add)
914 1559 : CASE_I64_BINOP(I64Sub, i64_sub)
915 1537 : CASE_I64_BINOP(I64Mul, i64_mul)
916 69182 : CASE_I64_CMPOP(I64Eq, kEqual)
917 56 : CASE_I64_CMPOP(I64Ne, kUnequal)
918 120 : CASE_I64_CMPOP(I64LtS, kSignedLessThan)
919 72 : CASE_I64_CMPOP(I64LtU, kUnsignedLessThan)
920 72 : CASE_I64_CMPOP(I64GtS, kSignedGreaterThan)
921 72 : CASE_I64_CMPOP(I64GtU, kUnsignedGreaterThan)
922 56 : CASE_I64_CMPOP(I64LeS, kSignedLessEqual)
923 88 : CASE_I64_CMPOP(I64LeU, kUnsignedLessEqual)
924 56 : CASE_I64_CMPOP(I64GeS, kSignedGreaterEqual)
925 56 : CASE_I64_CMPOP(I64GeU, kUnsignedGreaterEqual)
926 272 : CASE_F32_CMPOP(F32Eq, kEqual)
927 80 : CASE_F32_CMPOP(F32Ne, kUnequal)
928 224 : CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
929 240 : CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
930 224 : CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
931 208 : CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
932 160 : CASE_F64_CMPOP(F64Eq, kEqual)
933 80 : CASE_F64_CMPOP(F64Ne, kUnequal)
934 240 : CASE_F64_CMPOP(F64Lt, kUnsignedLessThan)
935 192 : CASE_F64_CMPOP(F64Gt, kUnsignedGreaterThan)
936 224 : CASE_F64_CMPOP(F64Le, kUnsignedLessEqual)
937 192 : CASE_F64_CMPOP(F64Ge, kUnsignedGreaterEqual)
938 56396 : CASE_I32_SHIFTOP(I32Shl, i32_shl)
939 56304 : CASE_I32_SHIFTOP(I32ShrS, i32_sar)
940 56304 : CASE_I32_SHIFTOP(I32ShrU, i32_shr)
941 30635 : CASE_I64_SHIFTOP(I64Shl, i64_shl)
942 2528 : CASE_I64_SHIFTOP(I64ShrS, i64_sar)
943 3040 : CASE_I64_SHIFTOP(I64ShrU, i64_shr)
944 40452 : CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
945 40404 : CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
946 888 : CASE_FLOAT_BINOP(F32Add, F32, f32_add)
947 719 : CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
948 896 : CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
949 758 : CASE_FLOAT_BINOP(F32Div, F32, f32_div)
950 128 : CASE_FLOAT_BINOP(F32Min, F32, f32_min)
951 128 : CASE_FLOAT_BINOP(F32Max, F32, f32_max)
952 56 : CASE_FLOAT_BINOP(F32CopySign, F32, f32_copysign)
953 1670 : CASE_FLOAT_BINOP(F64Add, F64, f64_add)
954 728 : CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
955 1128 : CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
956 880 : CASE_FLOAT_BINOP(F64Div, F64, f64_div)
957 160 : CASE_FLOAT_BINOP(F64Min, F64, f64_min)
958 128 : CASE_FLOAT_BINOP(F64Max, F64, f64_max)
959 56 : CASE_FLOAT_BINOP(F64CopySign, F64, f64_copysign)
960 : case WasmOpcode::kExprI32DivS:
961 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
962 : LiftoffRegister lhs,
963 56768 : LiftoffRegister rhs) {
964 : WasmCodePosition position = decoder->position();
965 : AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
966 : // Adding the second trap might invalidate the pointer returned for
967 : // the first one, thus get both pointers afterwards.
968 : AddOutOfLineTrap(position,
969 : WasmCode::kThrowWasmTrapDivUnrepresentable);
970 : Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
971 : Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
972 14192 : __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
973 : div_unrepresentable);
974 28384 : });
975 : break;
976 : case WasmOpcode::kExprI32DivU:
977 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
978 : LiftoffRegister lhs,
979 42516 : LiftoffRegister rhs) {
980 : Label* div_by_zero = AddOutOfLineTrap(
981 : decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
982 14172 : __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
983 28344 : });
984 : break;
985 : case WasmOpcode::kExprI32RemS:
986 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
987 : LiftoffRegister lhs,
988 42324 : LiftoffRegister rhs) {
989 : Label* rem_by_zero = AddOutOfLineTrap(
990 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
991 14108 : __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
992 28216 : });
993 : break;
994 : case WasmOpcode::kExprI32RemU:
995 : EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
996 : LiftoffRegister lhs,
997 42312 : LiftoffRegister rhs) {
998 : Label* rem_by_zero = AddOutOfLineTrap(
999 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
1000 14104 : __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
1001 28208 : });
1002 : break;
1003 : case WasmOpcode::kExprI64DivS:
1004 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1005 : LiftoffRegister lhs,
1006 2864 : LiftoffRegister rhs) {
1007 : WasmCodePosition position = decoder->position();
1008 : AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
1009 : // Adding the second trap might invalidate the pointer returned for
1010 : // the first one, thus get both pointers afterwards.
1011 : AddOutOfLineTrap(position,
1012 : WasmCode::kThrowWasmTrapDivUnrepresentable);
1013 : Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
1014 : Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
1015 716 : if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
1016 : div_unrepresentable)) {
1017 : ExternalReference ext_ref = ExternalReference::wasm_int64_div();
1018 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
1019 : div_unrepresentable);
1020 : }
1021 1432 : });
1022 : break;
1023 : case WasmOpcode::kExprI64DivU:
1024 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1025 : LiftoffRegister lhs,
1026 1968 : LiftoffRegister rhs) {
1027 : Label* div_by_zero = AddOutOfLineTrap(
1028 : decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
1029 656 : if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
1030 : ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
1031 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
1032 : }
1033 1312 : });
1034 : break;
1035 : case WasmOpcode::kExprI64RemS:
1036 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1037 : LiftoffRegister lhs,
1038 1920 : LiftoffRegister rhs) {
1039 : Label* rem_by_zero = AddOutOfLineTrap(
1040 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
1041 640 : if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
1042 : ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
1043 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
1044 : }
1045 1280 : });
1046 : break;
1047 : case WasmOpcode::kExprI64RemU:
1048 : EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
1049 : LiftoffRegister lhs,
1050 1920 : LiftoffRegister rhs) {
1051 : Label* rem_by_zero = AddOutOfLineTrap(
1052 : decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
1053 640 : if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
1054 : ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
1055 : EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
1056 : }
1057 1280 : });
1058 : break;
1059 : default:
1060 156 : return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
1061 : }
1062 : #undef CASE_I32_BINOP
1063 : #undef CASE_I64_BINOP
1064 : #undef CASE_FLOAT_BINOP
1065 : #undef CASE_I32_CMPOP
1066 : #undef CASE_I64_CMPOP
1067 : #undef CASE_F32_CMPOP
1068 : #undef CASE_F64_CMPOP
1069 : #undef CASE_I32_SHIFTOP
1070 : #undef CASE_I64_SHIFTOP
1071 : #undef CASE_CCALL_BINOP
1072 : }
1073 :
1074 : void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
1075 1306776 : __ cache_state()->stack_state.emplace_back(kWasmI32, value);
1076 : }
1077 :
1078 42095 : void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
1079 : // The {VarState} stores constant values as int32_t, thus we only store
1080 : // 64-bit constants in this field if it fits in an int32_t. Larger values
1081 : // cannot be used as immediate value anyway, so we can also just put them in
1082 : // a register immediately.
1083 42095 : int32_t value_i32 = static_cast<int32_t>(value);
1084 42095 : if (value_i32 == value) {
1085 20683 : __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
1086 : } else {
1087 21412 : LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
1088 21412 : __ LoadConstant(reg, WasmValue(value));
1089 : __ PushRegister(kWasmI64, reg);
1090 : }
1091 42097 : }
1092 :
1093 118080 : void F32Const(FullDecoder* decoder, Value* result, float value) {
1094 118080 : LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1095 118083 : __ LoadConstant(reg, WasmValue(value));
1096 : __ PushRegister(kWasmF32, reg);
1097 118084 : }
1098 :
1099 119012 : void F64Const(FullDecoder* decoder, Value* result, double value) {
1100 119012 : LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1101 119015 : __ LoadConstant(reg, WasmValue(value));
1102 : __ PushRegister(kWasmF64, reg);
1103 119016 : }
1104 :
1105 : void RefNull(FullDecoder* decoder, Value* result) {
1106 : unsupported(decoder, "ref_null");
1107 : }
1108 :
1109 : void Drop(FullDecoder* decoder, const Value& value) {
1110 : auto& slot = __ cache_state()->stack_state.back();
1111 : // If the dropped slot contains a register, decrement it's use count.
1112 2890 : if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
1113 : __ cache_state()->stack_state.pop_back();
1114 : }
1115 :
1116 574363 : void ReturnImpl(FullDecoder* decoder) {
1117 574363 : size_t num_returns = decoder->sig_->return_count();
1118 574363 : if (num_returns > 1) return unsupported(decoder, "multi-return");
1119 573803 : if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
1120 574200 : __ LeaveFrame(StackFrame::WASM_COMPILED);
1121 573830 : __ DropStackSlotsAndRet(
1122 573830 : static_cast<uint32_t>(descriptor_->StackParameterCount()));
1123 : }
1124 :
1125 : void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
1126 574379 : ReturnImpl(decoder);
1127 : }
1128 :
1129 251348 : void GetLocal(FullDecoder* decoder, Value* result,
1130 : const LocalIndexImmediate<validate>& imm) {
1131 251348 : auto& slot = __ cache_state()->stack_state[imm.index];
1132 : DCHECK_EQ(slot.type(), imm.type);
1133 251348 : switch (slot.loc()) {
1134 : case kRegister:
1135 : __ PushRegister(slot.type(), slot.reg());
1136 : break;
1137 : case kIntConst:
1138 419 : __ cache_state()->stack_state.emplace_back(imm.type, slot.i32_const());
1139 : break;
1140 : case kStack: {
1141 72033 : auto rc = reg_class_for(imm.type);
1142 72033 : LiftoffRegister reg = __ GetUnusedRegister(rc);
1143 72034 : __ Fill(reg, imm.index, imm.type);
1144 : __ PushRegister(slot.type(), reg);
1145 : break;
1146 : }
1147 : }
1148 251314 : }
1149 :
1150 0 : void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
1151 : uint32_t local_index) {
1152 : auto& state = *__ cache_state();
1153 : ValueType type = dst_slot.type();
1154 0 : if (dst_slot.is_reg()) {
1155 : LiftoffRegister slot_reg = dst_slot.reg();
1156 0 : if (state.get_use_count(slot_reg) == 1) {
1157 0 : __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
1158 : return;
1159 : }
1160 : state.dec_used(slot_reg);
1161 : dst_slot.MakeStack();
1162 : }
1163 : DCHECK_EQ(type, __ local_type(local_index));
1164 : RegClass rc = reg_class_for(type);
1165 0 : LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
1166 0 : __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
1167 0 : dst_slot = LiftoffAssembler::VarState(type, dst_reg);
1168 : __ cache_state()->inc_used(dst_reg);
1169 : }
1170 :
1171 76759 : void SetLocal(uint32_t local_index, bool is_tee) {
1172 : auto& state = *__ cache_state();
1173 : auto& source_slot = state.stack_state.back();
1174 76759 : auto& target_slot = state.stack_state[local_index];
1175 76759 : switch (source_slot.loc()) {
1176 : case kRegister:
1177 75449 : if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1178 75449 : target_slot = source_slot;
1179 75449 : if (is_tee) state.inc_used(target_slot.reg());
1180 : break;
1181 : case kIntConst:
1182 1318 : if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1183 1318 : target_slot = source_slot;
1184 1318 : break;
1185 : case kStack:
1186 0 : SetLocalFromStackSlot(target_slot, local_index);
1187 0 : break;
1188 : }
1189 76759 : if (!is_tee) __ cache_state()->stack_state.pop_back();
1190 76759 : }
1191 :
1192 : void SetLocal(FullDecoder* decoder, const Value& value,
1193 : const LocalIndexImmediate<validate>& imm) {
1194 75701 : SetLocal(imm.index, false);
1195 : }
1196 :
1197 : void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
1198 : const LocalIndexImmediate<validate>& imm) {
1199 1061 : SetLocal(imm.index, true);
1200 : }
1201 :
1202 1479 : Register GetGlobalBaseAndOffset(const WasmGlobal* global,
1203 : LiftoffRegList& pinned, uint32_t* offset) {
1204 1479 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
1205 1478 : if (global->mutability && global->imported) {
1206 168 : LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
1207 334 : __ Load(LiftoffRegister(addr), addr, no_reg,
1208 336 : global->index * sizeof(Address), kPointerLoadType, pinned);
1209 166 : *offset = 0;
1210 : } else {
1211 1310 : LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
1212 1314 : *offset = global->offset;
1213 : }
1214 1480 : return addr;
1215 : }
1216 :
1217 1333 : void GetGlobal(FullDecoder* decoder, Value* result,
1218 : const GlobalIndexImmediate<validate>& imm) {
1219 1333 : const auto* global = &env_->module->globals[imm.index];
1220 1333 : if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
1221 239 : return;
1222 1092 : LiftoffRegList pinned;
1223 1092 : uint32_t offset = 0;
1224 1092 : Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1225 : LiftoffRegister value =
1226 2186 : pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
1227 1092 : LoadType type = LoadType::ForValueType(global->type);
1228 1091 : __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
1229 1091 : __ PushRegister(global->type, value);
1230 : }
1231 :
1232 387 : void SetGlobal(FullDecoder* decoder, const Value& value,
1233 : const GlobalIndexImmediate<validate>& imm) {
1234 387 : auto* global = &env_->module->globals[imm.index];
1235 387 : if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
1236 0 : return;
1237 387 : LiftoffRegList pinned;
1238 387 : uint32_t offset = 0;
1239 387 : Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1240 773 : LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
1241 386 : StoreType type = StoreType::ForValueType(global->type);
1242 386 : __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
1243 : }
1244 :
1245 : void GetTable(FullDecoder* decoder, const Value& index, Value* result,
1246 : TableIndexImmediate<validate>& imm) {
1247 : unsupported(decoder, "table_get");
1248 : }
1249 :
1250 : void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
1251 : TableIndexImmediate<validate>& imm) {
1252 : unsupported(decoder, "table_set");
1253 : }
1254 :
1255 106043 : void Unreachable(FullDecoder* decoder) {
1256 : Label* unreachable_label = AddOutOfLineTrap(
1257 : decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
1258 : __ emit_jump(unreachable_label);
1259 : __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
1260 106041 : }
1261 :
1262 846 : void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
1263 : const Value& tval, Value* result) {
1264 : LiftoffRegList pinned;
1265 1694 : Register condition = pinned.set(__ PopToRegister()).gp();
1266 : ValueType type = __ cache_state()->stack_state.end()[-1].type();
1267 : DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
1268 848 : LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
1269 855 : LiftoffRegister true_value = __ PopToRegister(pinned);
1270 : LiftoffRegister dst =
1271 1716 : __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
1272 : __ PushRegister(type, dst);
1273 :
1274 : // Now emit the actual code to move either {true_value} or {false_value}
1275 : // into {dst}.
1276 852 : Label cont;
1277 852 : Label case_false;
1278 852 : __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
1279 842 : if (dst != true_value) __ Move(dst, true_value, type);
1280 : __ emit_jump(&cont);
1281 :
1282 842 : __ bind(&case_false);
1283 843 : if (dst != false_value) __ Move(dst, false_value, type);
1284 844 : __ bind(&cont);
1285 851 : }
1286 :
1287 121366 : void BrImpl(Control* target) {
1288 121366 : if (!target->br_merge()->reached) {
1289 360864 : target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1290 : target->br_merge()->arity,
1291 120288 : target->stack_depth);
1292 : }
1293 121367 : __ MergeStackWith(target->label_state, target->br_merge()->arity);
1294 121375 : __ jmp(target->label.get());
1295 121359 : }
1296 :
1297 2373 : void Br(FullDecoder* decoder, Control* target) { BrImpl(target); }
1298 :
1299 119167 : void BrOrRet(FullDecoder* decoder, uint32_t depth) {
1300 119167 : if (depth == decoder->control_depth() - 1) {
1301 175 : ReturnImpl(decoder);
1302 : } else {
1303 118992 : BrImpl(decoder->control_at(depth));
1304 : }
1305 119168 : }
1306 :
1307 105933 : void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
1308 105933 : Label cont_false;
1309 211864 : Register value = __ PopToRegister().gp();
1310 105931 : __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
1311 :
1312 105931 : BrOrRet(decoder, depth);
1313 105931 : __ bind(&cont_false);
1314 105930 : }
1315 :
1316 : // Generate a branch table case, potentially reusing previously generated
1317 : // stack transfer code.
1318 216012 : void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
1319 : std::map<uint32_t, MovableLabel>& br_targets) {
1320 216012 : MovableLabel& label = br_targets[br_depth];
1321 216024 : if (label.get()->is_bound()) {
1322 202782 : __ jmp(label.get());
1323 : } else {
1324 13242 : __ bind(label.get());
1325 13237 : BrOrRet(decoder, br_depth);
1326 : }
1327 216023 : }
1328 :
1329 : // Generate a branch table for input in [min, max).
1330 : // TODO(wasm): Generate a real branch table (like TF TableSwitch).
1331 416575 : void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
1332 : LiftoffRegister value, uint32_t min, uint32_t max,
1333 : BranchTableIterator<validate>& table_iterator,
1334 : std::map<uint32_t, MovableLabel>& br_targets) {
1335 : DCHECK_LT(min, max);
1336 : // Check base case.
1337 416575 : if (max == min + 1) {
1338 : DCHECK_EQ(min, table_iterator.cur_index());
1339 210717 : GenerateBrCase(decoder, table_iterator.next(), br_targets);
1340 210713 : return;
1341 : }
1342 :
1343 205858 : uint32_t split = min + (max - min) / 2;
1344 205858 : Label upper_half;
1345 205858 : __ LoadConstant(tmp, WasmValue(split));
1346 : __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
1347 205862 : tmp.gp());
1348 : // Emit br table for lower half:
1349 : GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
1350 205863 : br_targets);
1351 205860 : __ bind(&upper_half);
1352 : // Emit br table for upper half:
1353 : GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
1354 205859 : br_targets);
1355 : }
1356 :
1357 5310 : void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
1358 : const Value& key) {
1359 : LiftoffRegList pinned;
1360 10619 : LiftoffRegister value = pinned.set(__ PopToRegister());
1361 : BranchTableIterator<validate> table_iterator(decoder, imm);
1362 : std::map<uint32_t, MovableLabel> br_targets;
1363 :
1364 5309 : if (imm.table_count > 0) {
1365 : LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
1366 9716 : __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
1367 4858 : Label case_default;
1368 : __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
1369 4858 : value.gp(), tmp.gp());
1370 :
1371 4857 : GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
1372 4857 : br_targets);
1373 :
1374 4857 : __ bind(&case_default);
1375 : }
1376 :
1377 : // Generate the default case.
1378 5309 : GenerateBrCase(decoder, table_iterator.next(), br_targets);
1379 : DCHECK(!table_iterator.has_next());
1380 5316 : }
1381 :
1382 1679 : void Else(FullDecoder* decoder, Control* c) {
1383 1679 : if (c->reachable()) {
1384 1419 : if (!c->end_merge.reached) {
1385 2832 : c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1386 1416 : c->end_merge.arity, c->stack_depth);
1387 : }
1388 1423 : __ MergeFullStackWith(c->label_state, *__ cache_state());
1389 : __ emit_jump(c->label.get());
1390 : }
1391 1678 : __ bind(c->else_state->label.get());
1392 3356 : __ cache_state()->Steal(c->else_state->state);
1393 1680 : }
1394 :
1395 : Label* AddOutOfLineTrap(WasmCodePosition position,
1396 : WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
1397 : DCHECK(!FLAG_wasm_no_bounds_checks);
1398 : // The pc is needed for memory OOB trap with trap handler enabled. Other
1399 : // callers should not even compute it.
1400 : DCHECK_EQ(pc != 0, stub == WasmCode::kThrowWasmTrapMemOutOfBounds &&
1401 : env_->use_trap_handler);
1402 :
1403 768901 : out_of_line_code_.push_back(OutOfLineCode::Trap(stub, position, pc));
1404 : return out_of_line_code_.back().label.get();
1405 : }
1406 :
1407 : // Returns true if the memory access is statically known to be out of bounds
1408 : // (a jump to the trap was generated then); return false otherwise.
1409 198739 : bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
1410 : uint32_t offset, Register index, LiftoffRegList pinned) {
1411 : const bool statically_oob =
1412 198739 : !IsInBounds(offset, access_size, env_->max_memory_size);
1413 :
1414 198739 : if (!statically_oob &&
1415 197227 : (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
1416 : return false;
1417 : }
1418 :
1419 : // TODO(wasm): This adds protected instruction information for the jump
1420 : // instruction we are about to generate. It would be better to just not add
1421 : // protected instruction info when the pc is 0.
1422 3212 : Label* trap_label = AddOutOfLineTrap(
1423 : decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds,
1424 1626 : env_->use_trap_handler ? __ pc_offset() : 0);
1425 :
1426 1633 : if (statically_oob) {
1427 : __ emit_jump(trap_label);
1428 : Control* current_block = decoder->control_at(0);
1429 1578 : if (current_block->reachable()) {
1430 1580 : current_block->reachability = kSpecOnlyReachable;
1431 : }
1432 : return true;
1433 : }
1434 :
1435 : DCHECK(!env_->use_trap_handler);
1436 : DCHECK(!FLAG_wasm_no_bounds_checks);
1437 :
1438 32 : uint64_t end_offset = uint64_t{offset} + access_size - 1u;
1439 :
1440 : // If the end offset is larger than the smallest memory, dynamically check
1441 : // the end offset against the actual memory size, which is not known at
1442 : // compile time. Otherwise, only one check is required (see below).
1443 : LiftoffRegister end_offset_reg =
1444 32 : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1445 : Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
1446 32 : LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
1447 :
1448 : if (kSystemPointerSize == 8) {
1449 32 : __ LoadConstant(end_offset_reg, WasmValue(end_offset));
1450 : } else {
1451 : __ LoadConstant(end_offset_reg,
1452 : WasmValue(static_cast<uint32_t>(end_offset)));
1453 : }
1454 :
1455 32 : if (end_offset >= env_->min_memory_size) {
1456 : __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1457 : LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
1458 15 : mem_size);
1459 : }
1460 :
1461 : // Just reuse the end_offset register for computing the effective size.
1462 : LiftoffRegister effective_size_reg = end_offset_reg;
1463 : __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp());
1464 :
1465 : __ emit_i32_to_intptr(index, index);
1466 :
1467 : __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1468 : LiftoffAssembler::kWasmIntPtr, index,
1469 32 : effective_size_reg.gp());
1470 32 : return false;
1471 : }
1472 :
1473 19 : void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
1474 : Register index, uint32_t offset,
1475 : WasmCodePosition position) {
1476 : // Before making the runtime call, spill all cache registers.
1477 19 : __ SpillAllRegisters();
1478 :
1479 : LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1480 : // Get one register for computing the address (offset + index).
1481 : LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1482 : // Compute offset+index in address.
1483 20 : __ LoadConstant(address, WasmValue(offset));
1484 19 : __ emit_i32_add(address.gp(), address.gp(), index);
1485 :
1486 : // Get a register to hold the stack slot for MemoryTracingInfo.
1487 : LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1488 : // Allocate stack slot for MemoryTracingInfo.
1489 19 : __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
1490 :
1491 : // Now store all information into the MemoryTracingInfo struct.
1492 20 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
1493 19 : StoreType::kI32Store, pinned);
1494 40 : __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
1495 20 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
1496 20 : StoreType::kI32Store8, pinned);
1497 20 : __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
1498 20 : __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
1499 20 : StoreType::kI32Store8, pinned);
1500 :
1501 40 : source_position_table_builder_.AddPosition(__ pc_offset(),
1502 20 : SourcePosition(position), false);
1503 :
1504 20 : Register args[] = {info.gp()};
1505 20 : GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
1506 : __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
1507 19 : }
1508 :
1509 20 : void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
1510 : Register* args) {
1511 38 : auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
1512 : compilation_zone_, runtime_function, num_args,
1513 20 : compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
1514 : // Currently, only one argument is supported. More arguments require some
1515 : // caution for the parallel register moves (reuse StackTransferRecipe).
1516 : DCHECK_EQ(1, num_args);
1517 : constexpr size_t kInputShift = 1; // Input 0 is the call target.
1518 : compiler::LinkageLocation param_loc =
1519 : call_descriptor->GetInputLocation(kInputShift);
1520 18 : if (param_loc.IsRegister()) {
1521 : Register reg = Register::from_code(param_loc.AsRegister());
1522 0 : __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
1523 0 : LiftoffAssembler::kWasmIntPtr);
1524 : } else {
1525 : DCHECK(param_loc.IsCallerFrameSlot());
1526 18 : LiftoffStackSlots stack_slots(&asm_);
1527 18 : stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
1528 : LiftoffRegister(args[0])));
1529 18 : stack_slots.Construct();
1530 : }
1531 :
1532 : // Set context to "no context" for the runtime call.
1533 18 : __ TurboAssembler::Move(kContextRegister,
1534 18 : Smi::FromInt(Context::kNoContext));
1535 20 : Register centry = kJavaScriptCallCodeStartRegister;
1536 20 : LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
1537 20 : __ CallRuntimeWithCEntry(runtime_function, centry);
1538 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
1539 19 : Safepoint::kNoLazyDeopt);
1540 19 : }
1541 :
1542 197187 : Register AddMemoryMasking(Register index, uint32_t* offset,
1543 : LiftoffRegList& pinned) {
1544 197187 : if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
1545 197187 : return index;
1546 : }
1547 : DEBUG_CODE_COMMENT("Mask memory index");
1548 : // Make sure that we can overwrite {index}.
1549 0 : if (__ cache_state()->is_used(LiftoffRegister(index))) {
1550 : Register old_index = index;
1551 : pinned.clear(LiftoffRegister(old_index));
1552 0 : index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1553 0 : if (index != old_index) __ Move(index, old_index, kWasmI32);
1554 : }
1555 0 : Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
1556 0 : __ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
1557 : __ emit_ptrsize_add(index, index, tmp);
1558 0 : LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
1559 : __ emit_ptrsize_and(index, index, tmp);
1560 0 : *offset = 0;
1561 0 : return index;
1562 : }
1563 :
1564 85335 : void LoadMem(FullDecoder* decoder, LoadType type,
1565 : const MemoryAccessImmediate<validate>& imm,
1566 : const Value& index_val, Value* result) {
1567 : ValueType value_type = type.value_type();
1568 85335 : if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "load"))
1569 860 : return;
1570 85338 : LiftoffRegList pinned;
1571 170676 : Register index = pinned.set(__ PopToRegister()).gp();
1572 170676 : if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
1573 : return;
1574 : }
1575 84480 : uint32_t offset = imm.offset;
1576 84480 : index = AddMemoryMasking(index, &offset, pinned);
1577 : DEBUG_CODE_COMMENT("Load from memory");
1578 84483 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1579 84483 : LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
1580 : RegClass rc = reg_class_for(value_type);
1581 : LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
1582 84512 : uint32_t protected_load_pc = 0;
1583 84512 : __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
1584 84494 : if (env_->use_trap_handler) {
1585 : AddOutOfLineTrap(decoder->position(),
1586 : WasmCode::kThrowWasmTrapMemOutOfBounds,
1587 84458 : protected_load_pc);
1588 : }
1589 : __ PushRegister(value_type, value);
1590 :
1591 84501 : if (FLAG_trace_wasm_memory) {
1592 : TraceMemoryOperation(false, type.mem_type().representation(), index,
1593 24 : offset, decoder->position());
1594 : }
1595 : }
1596 :
1597 113541 : void StoreMem(FullDecoder* decoder, StoreType type,
1598 : const MemoryAccessImmediate<validate>& imm,
1599 : const Value& index_val, const Value& value_val) {
1600 : ValueType value_type = type.value_type();
1601 113541 : if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "store"))
1602 744 : return;
1603 113541 : LiftoffRegList pinned;
1604 227062 : LiftoffRegister value = pinned.set(__ PopToRegister());
1605 113521 : Register index = pinned.set(__ PopToRegister(pinned)).gp();
1606 227070 : if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
1607 : return;
1608 : }
1609 112804 : uint32_t offset = imm.offset;
1610 112804 : index = AddMemoryMasking(index, &offset, pinned);
1611 : DEBUG_CODE_COMMENT("Store to memory");
1612 112844 : Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1613 112844 : LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
1614 112851 : uint32_t protected_store_pc = 0;
1615 : LiftoffRegList outer_pinned;
1616 112851 : if (FLAG_trace_wasm_memory) outer_pinned.set(index);
1617 : __ Store(addr, index, offset, value, type, outer_pinned,
1618 112851 : &protected_store_pc, true);
1619 112907 : if (env_->use_trap_handler) {
1620 : AddOutOfLineTrap(decoder->position(),
1621 : WasmCode::kThrowWasmTrapMemOutOfBounds,
1622 112919 : protected_store_pc);
1623 : }
1624 112909 : if (FLAG_trace_wasm_memory) {
1625 : TraceMemoryOperation(true, type.mem_rep(), index, offset,
1626 16 : decoder->position());
1627 : }
1628 : }
1629 :
1630 492 : void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
1631 492 : Register mem_size = __ GetUnusedRegister(kGpReg).gp();
1632 492 : LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
1633 : __ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
1634 : __ PushRegister(kWasmI32, LiftoffRegister(mem_size));
1635 492 : }
1636 :
1637 1319 : void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
1638 : // Pop the input, then spill all cache registers to make the runtime call.
1639 : LiftoffRegList pinned;
1640 2638 : LiftoffRegister input = pinned.set(__ PopToRegister());
1641 1319 : __ SpillAllRegisters();
1642 :
1643 : constexpr Register kGpReturnReg = kGpReturnRegisters[0];
1644 : static_assert(kLiftoffAssemblerGpCacheRegs & Register::bit<kGpReturnReg>(),
1645 : "first return register is a cache register (needs more "
1646 : "complex code here otherwise)");
1647 : LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
1648 :
1649 : WasmMemoryGrowDescriptor descriptor;
1650 : DCHECK_EQ(0, descriptor.GetStackParameterCount());
1651 : DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
1652 : DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
1653 : descriptor.GetParameterType(0));
1654 :
1655 : Register param_reg = descriptor.GetRegisterParameter(0);
1656 1318 : if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
1657 :
1658 : __ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
1659 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
1660 1319 : Safepoint::kNoLazyDeopt);
1661 :
1662 : if (kReturnRegister0 != result.gp()) {
1663 : __ Move(result.gp(), kReturnRegister0, kWasmI32);
1664 : }
1665 :
1666 : __ PushRegister(kWasmI32, result);
1667 1319 : }
1668 :
1669 121879 : void CallDirect(FullDecoder* decoder,
1670 : const CallFunctionImmediate<validate>& imm,
1671 : const Value args[], Value returns[]) {
1672 121879 : if (imm.sig->return_count() > 1)
1673 : return unsupported(decoder, "multi-return");
1674 235984 : if (imm.sig->return_count() == 1 &&
1675 114660 : !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
1676 : "return"))
1677 : return;
1678 :
1679 : auto call_descriptor =
1680 121328 : compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1681 : call_descriptor =
1682 : GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1683 :
1684 121409 : if (imm.index < env_->module->num_imported_functions) {
1685 : // A direct call to an imported function.
1686 : LiftoffRegList pinned;
1687 112784 : Register tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1688 112784 : Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1689 :
1690 112784 : Register imported_targets = tmp;
1691 112784 : LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
1692 : kSystemPointerSize);
1693 225570 : __ Load(LiftoffRegister(target), imported_targets, no_reg,
1694 225570 : imm.index * sizeof(Address), kPointerLoadType, pinned);
1695 :
1696 112785 : Register imported_function_refs = tmp;
1697 112785 : LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
1698 : ImportedFunctionRefs);
1699 112785 : Register imported_function_ref = tmp;
1700 112785 : __ LoadTaggedPointer(
1701 : imported_function_ref, imported_function_refs, no_reg,
1702 225570 : ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
1703 :
1704 : Register* explicit_instance = &imported_function_ref;
1705 112785 : __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
1706 225570 : source_position_table_builder_.AddPosition(
1707 112785 : __ pc_offset(), SourcePosition(decoder->position()), false);
1708 :
1709 112785 : __ CallIndirect(imm.sig, call_descriptor, target);
1710 :
1711 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
1712 112785 : Safepoint::kNoLazyDeopt);
1713 :
1714 112785 : __ FinishCall(imm.sig, call_descriptor);
1715 : } else {
1716 : // A direct call within this module just gets the current instance.
1717 8625 : __ PrepareCall(imm.sig, call_descriptor);
1718 :
1719 17232 : source_position_table_builder_.AddPosition(
1720 8549 : __ pc_offset(), SourcePosition(decoder->position()), false);
1721 :
1722 : // Just encode the function index. This will be patched at instantiation.
1723 8683 : Address addr = static_cast<Address>(imm.index);
1724 : __ CallNativeWasmCode(addr);
1725 :
1726 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
1727 8544 : Safepoint::kNoLazyDeopt);
1728 :
1729 8617 : __ FinishCall(imm.sig, call_descriptor);
1730 : }
1731 : }
1732 :
1733 2737 : void CallIndirect(FullDecoder* decoder, const Value& index_val,
1734 : const CallIndirectImmediate<validate>& imm,
1735 : const Value args[], Value returns[]) {
1736 2737 : if (imm.sig->return_count() > 1) {
1737 47 : return unsupported(decoder, "multi-return");
1738 : }
1739 2737 : if (imm.table_index != 0) {
1740 : return unsupported(decoder, "table index != 0");
1741 : }
1742 4207 : if (imm.sig->return_count() == 1 &&
1743 1516 : !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
1744 : "return")) {
1745 : return;
1746 : }
1747 :
1748 : // Pop the index.
1749 5382 : Register index = __ PopToRegister().gp();
1750 : // If that register is still being used after popping, we move it to another
1751 : // register, because we want to modify that register.
1752 2691 : if (__ cache_state()->is_used(LiftoffRegister(index))) {
1753 : Register new_index =
1754 : __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index)).gp();
1755 564 : __ Move(new_index, index, kWasmI32);
1756 : index = new_index;
1757 : }
1758 :
1759 : LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1760 : // Get three temporary registers.
1761 : Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1762 2691 : Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1763 : Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1764 :
1765 : // Bounds check against the table size.
1766 : Label* invalid_func_label = AddOutOfLineTrap(
1767 : decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
1768 :
1769 5388 : uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
1770 : DCHECK_GE(canonical_sig_num, 0);
1771 : DCHECK_GE(kMaxInt, canonical_sig_num);
1772 :
1773 : // Compare against table size stored in
1774 : // {instance->indirect_function_table_size}.
1775 2694 : LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
1776 : __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
1777 2694 : index, tmp_const);
1778 :
1779 : // Mask the index to prevent SSCA.
1780 2692 : if (FLAG_untrusted_code_mitigations) {
1781 : DEBUG_CODE_COMMENT("Mask indirect call index");
1782 : // mask = ((index - size) & ~index) >> 31
1783 : // Reuse allocated registers; note: size is still stored in {tmp_const}.
1784 : Register diff = table;
1785 0 : Register neg_index = tmp_const;
1786 : Register mask = scratch;
1787 : // 1) diff = index - size
1788 0 : __ emit_i32_sub(diff, index, tmp_const);
1789 : // 2) neg_index = ~index
1790 0 : __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
1791 : __ emit_i32_xor(neg_index, neg_index, index);
1792 : // 3) mask = diff & neg_index
1793 : __ emit_i32_and(mask, diff, neg_index);
1794 : // 4) mask = mask >> 31
1795 0 : __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(int32_t{31}));
1796 : __ emit_i32_sar(mask, mask, tmp_const, pinned);
1797 :
1798 : // Apply mask.
1799 : __ emit_i32_and(index, index, mask);
1800 : }
1801 :
1802 : DEBUG_CODE_COMMENT("Check indirect call signature");
1803 : // Load the signature from {instance->ift_sig_ids[key]}
1804 2692 : LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
1805 : // Multiply {index} by 4 to represent kInt32Size items.
1806 : STATIC_ASSERT(kInt32Size == 4);
1807 : // TODO(wasm): use a emit_i32_shli() instead of two adds.
1808 : // (currently cannot use shl on ia32/x64 because it clobbers %rcx).
1809 2689 : __ emit_i32_add(index, index, index);
1810 2688 : __ emit_i32_add(index, index, index);
1811 2690 : __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
1812 2688 : pinned);
1813 :
1814 : // Compare against expected signature.
1815 2690 : __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
1816 :
1817 : Label* sig_mismatch_label = AddOutOfLineTrap(
1818 : decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
1819 : __ emit_cond_jump(kUnequal, sig_mismatch_label,
1820 2694 : LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
1821 :
1822 : // At this point {index} has already been multiplied by 4.
1823 : DEBUG_CODE_COMMENT("Execute indirect call");
1824 : if (kTaggedSize != kInt32Size) {
1825 : DCHECK_EQ(kTaggedSize, kInt32Size * 2);
1826 : // Multiply {index} by another 2 to represent kTaggedSize items.
1827 : __ emit_i32_add(index, index, index);
1828 : }
1829 : // At this point {index} has already been multiplied by kTaggedSize.
1830 :
1831 : // Load the instance from {instance->ift_instances[key]}
1832 2694 : LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
1833 : __ LoadTaggedPointer(tmp_const, table, index,
1834 : ObjectAccess::ElementOffsetInTaggedFixedArray(0),
1835 2693 : pinned);
1836 :
1837 : if (kTaggedSize != kSystemPointerSize) {
1838 : DCHECK_EQ(kSystemPointerSize, kTaggedSize * 2);
1839 : // Multiply {index} by another 2 to represent kSystemPointerSize items.
1840 2693 : __ emit_i32_add(index, index, index);
1841 : }
1842 : // At this point {index} has already been multiplied by kSystemPointerSize.
1843 :
1844 : Register* explicit_instance = &tmp_const;
1845 :
1846 : // Load the target from {instance->ift_targets[key]}
1847 2693 : LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
1848 : kSystemPointerSize);
1849 2694 : __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
1850 2695 : pinned);
1851 :
1852 5386 : source_position_table_builder_.AddPosition(
1853 2694 : __ pc_offset(), SourcePosition(decoder->position()), false);
1854 :
1855 : auto call_descriptor =
1856 2692 : compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1857 : call_descriptor =
1858 : GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1859 :
1860 2690 : Register target = scratch;
1861 2690 : __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
1862 2695 : __ CallIndirect(imm.sig, call_descriptor, target);
1863 :
1864 : safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
1865 2692 : Safepoint::kNoLazyDeopt);
1866 :
1867 2690 : __ FinishCall(imm.sig, call_descriptor);
1868 : }
1869 :
1870 : void ReturnCall(FullDecoder* decoder,
1871 : const CallFunctionImmediate<validate>& imm,
1872 : const Value args[]) {
1873 : unsupported(decoder, "return_call");
1874 : }
1875 : void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
1876 : const CallIndirectImmediate<validate>& imm,
1877 : const Value args[]) {
1878 : unsupported(decoder, "return_call_indirect");
1879 : }
1880 : void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1881 : Value* result) {
1882 : unsupported(decoder, "simd");
1883 : }
1884 : void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
1885 : const SimdLaneImmediate<validate>& imm,
1886 : const Vector<Value> inputs, Value* result) {
1887 : unsupported(decoder, "simd");
1888 : }
1889 : void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
1890 : const SimdShiftImmediate<validate>& imm, const Value& input,
1891 : Value* result) {
1892 : unsupported(decoder, "simd");
1893 : }
1894 : void Simd8x16ShuffleOp(FullDecoder* decoder,
1895 : const Simd8x16ShuffleImmediate<validate>& imm,
1896 : const Value& input0, const Value& input1,
1897 : Value* result) {
1898 : unsupported(decoder, "simd");
1899 : }
1900 : void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
1901 : const Vector<Value>& args) {
1902 : unsupported(decoder, "throw");
1903 : }
1904 : void Rethrow(FullDecoder* decoder, const Value& exception) {
1905 : unsupported(decoder, "rethrow");
1906 : }
1907 : void BrOnException(FullDecoder* decoder, const Value& exception,
1908 : const ExceptionIndexImmediate<validate>& imm,
1909 : uint32_t depth, Vector<Value> values) {
1910 : unsupported(decoder, "br_on_exn");
1911 : }
1912 : void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1913 : const MemoryAccessImmediate<validate>& imm, Value* result) {
1914 : unsupported(decoder, "atomicop");
1915 : }
1916 : void MemoryInit(FullDecoder* decoder,
1917 : const MemoryInitImmediate<validate>& imm, const Value& dst,
1918 : const Value& src, const Value& size) {
1919 : unsupported(decoder, "memory.init");
1920 : }
1921 : void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
1922 : unsupported(decoder, "data.drop");
1923 : }
1924 : void MemoryCopy(FullDecoder* decoder,
1925 : const MemoryCopyImmediate<validate>& imm, const Value& dst,
1926 : const Value& src, const Value& size) {
1927 : unsupported(decoder, "memory.copy");
1928 : }
1929 : void MemoryFill(FullDecoder* decoder,
1930 : const MemoryIndexImmediate<validate>& imm, const Value& dst,
1931 : const Value& value, const Value& size) {
1932 : unsupported(decoder, "memory.fill");
1933 : }
1934 : void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
1935 : Vector<Value> args) {
1936 : unsupported(decoder, "table.init");
1937 : }
1938 : void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
1939 : unsupported(decoder, "elem.drop");
1940 : }
1941 : void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
1942 : Vector<Value> args) {
1943 : unsupported(decoder, "table.copy");
1944 : }
1945 :
1946 : private:
1947 : LiftoffAssembler asm_;
1948 : compiler::CallDescriptor* const descriptor_;
1949 : CompilationEnv* const env_;
1950 : bool ok_ = true;
1951 : std::vector<OutOfLineCode> out_of_line_code_;
1952 : SourcePositionTableBuilder source_position_table_builder_;
1953 : std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
1954 : // Zone used to store information during compilation. The result will be
1955 : // stored independently, such that this zone can die together with the
1956 : // LiftoffCompiler after compilation.
1957 : Zone* compilation_zone_;
1958 : SafepointTableBuilder safepoint_table_builder_;
1959 : // The pc offset of the instructions to reserve the stack frame. Needed to
1960 : // patch the actually needed stack size in the end.
1961 : uint32_t pc_offset_stack_frame_construction_ = 0;
1962 :
1963 : void TraceCacheState(FullDecoder* decoder) const {
1964 : #ifdef DEBUG
1965 : if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
1966 : StdoutStream os;
1967 : for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
1968 : --control_depth) {
1969 : auto* cache_state =
1970 : control_depth == -1 ? __ cache_state()
1971 : : &decoder->control_at(control_depth)
1972 : ->label_state;
1973 : os << PrintCollection(cache_state->stack_state);
1974 : if (control_depth != -1) PrintF("; ");
1975 : }
1976 : os << "\n";
1977 : #endif
1978 : }
1979 :
1980 : DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
1981 : };
1982 :
1983 : } // namespace
1984 :
1985 594672 : WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
1986 : CompilationEnv* env, const FunctionBody& func_body, Counters* counters,
1987 : WasmFeatures* detected) {
1988 1785063 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
1989 : "ExecuteLiftoffCompilation");
1990 : base::ElapsedTimer compile_timer;
1991 : if (FLAG_trace_wasm_decode_time) {
1992 : compile_timer.Start();
1993 : }
1994 :
1995 1784920 : Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
1996 594598 : const WasmModule* module = env ? env->module : nullptr;
1997 594598 : auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
1998 : base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
1999 : base::in_place, counters->liftoff_compile_time());
2000 : std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
2001 1189690 : wasm::WasmInstructionBuffer::New();
2002 : WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
2003 595292 : &zone, module, env->enabled_features, detected, func_body,
2004 2378914 : call_descriptor, env, &zone, instruction_buffer->CreateView());
2005 594165 : decoder.Decode();
2006 : liftoff_compile_time_scope.reset();
2007 : LiftoffCompiler* compiler = &decoder.interface();
2008 593716 : if (decoder.failed()) {
2009 : compiler->OnFirstError(&decoder);
2010 25447 : return WasmCompilationResult{};
2011 : }
2012 568269 : if (!compiler->ok()) {
2013 : // Liftoff compilation failed.
2014 0 : counters->liftoff_unsupported_functions()->Increment();
2015 0 : return WasmCompilationResult{};
2016 : }
2017 :
2018 568269 : counters->liftoff_compiled_functions()->Increment();
2019 :
2020 : if (FLAG_trace_wasm_decode_time) {
2021 : double compile_ms = compile_timer.Elapsed().InMillisecondsF();
2022 : PrintF(
2023 : "wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
2024 : "compile\n",
2025 : static_cast<unsigned>(func_body.end - func_body.start), compile_ms);
2026 : }
2027 :
2028 568689 : WasmCompilationResult result;
2029 : compiler->GetCode(&result.code_desc);
2030 1136709 : result.instr_buffer = instruction_buffer->ReleaseBuffer();
2031 568433 : result.source_positions = compiler->GetSourcePositionTable();
2032 568669 : result.protected_instructions = compiler->GetProtectedInstructions();
2033 568669 : result.frame_slot_count = compiler->GetTotalFrameSlotCount();
2034 568669 : result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
2035 568689 : result.result_tier = ExecutionTier::kBaseline;
2036 :
2037 : DCHECK(result.succeeded());
2038 : return result;
2039 : }
2040 :
2041 : #undef __
2042 : #undef TRACE
2043 : #undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
2044 : #undef WASM_INSTANCE_OBJECT_FIELD_SIZE
2045 : #undef LOAD_INSTANCE_FIELD
2046 : #undef LOAD_TAGGED_PTR_INSTANCE_FIELD
2047 : #undef DEBUG_CODE_COMMENT
2048 :
2049 : } // namespace wasm
2050 : } // namespace internal
2051 120216 : } // namespace v8
|