Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/interpreter/interpreter-assembler.h"
6 :
7 : #include <limits>
8 : #include <ostream>
9 :
10 : #include "src/code-factory.h"
11 : #include "src/frames.h"
12 : #include "src/interface-descriptors.h"
13 : #include "src/interpreter/bytecodes.h"
14 : #include "src/interpreter/interpreter.h"
15 : #include "src/machine-type.h"
16 : #include "src/objects-inl.h"
17 : #include "src/zone/zone.h"
18 :
19 : namespace v8 {
20 : namespace internal {
21 : namespace interpreter {
22 :
23 : using compiler::CodeAssemblerState;
24 : using compiler::Node;
25 : template <class T>
26 : using TNode = compiler::TNode<T>;
27 :
28 27990 : InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
29 : Bytecode bytecode,
30 : OperandScale operand_scale)
31 : : CodeStubAssembler(state),
32 : bytecode_(bytecode),
33 : operand_scale_(operand_scale),
34 : VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
35 : MachineType::PointerRepresentation()),
36 : VARIABLE_CONSTRUCTOR(
37 : bytecode_array_, MachineRepresentation::kTagged,
38 : Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
39 : VARIABLE_CONSTRUCTOR(
40 : bytecode_offset_, MachineType::PointerRepresentation(),
41 : Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
42 : VARIABLE_CONSTRUCTOR(
43 : dispatch_table_, MachineType::PointerRepresentation(),
44 : Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
45 : VARIABLE_CONSTRUCTOR(
46 : accumulator_, MachineRepresentation::kTagged,
47 : Parameter(InterpreterDispatchDescriptor::kAccumulator)),
48 : accumulator_use_(AccumulatorUse::kNone),
49 : made_call_(false),
50 : reloaded_frame_ptr_(false),
51 : bytecode_array_valid_(true),
52 : disable_stack_check_across_call_(false),
53 27990 : stack_pointer_before_call_(nullptr) {
54 : #ifdef V8_TRACE_IGNITION
55 : TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
56 : #endif
57 60386 : RegisterCallGenerationCallbacks([this] { CallPrologue(); },
58 144356 : [this] { CallEpilogue(); });
59 :
60 : // Save the bytecode offset immediately if bytecode will make a call along the
61 : // critical path, or it is a return bytecode.
62 52278 : if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
63 : Bytecodes::Returns(bytecode)) {
64 3942 : SaveBytecodeOffset();
65 : }
66 27990 : }
67 :
68 55980 : InterpreterAssembler::~InterpreterAssembler() {
69 : // If the following check fails the handler does not use the
70 : // accumulator in the way described in the bytecode definitions in
71 : // bytecodes.h.
72 : DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
73 27990 : UnregisterCallGenerationCallbacks();
74 27990 : }
75 :
76 147485 : Node* InterpreterAssembler::GetInterpretedFramePointer() {
77 147485 : if (!interpreted_frame_pointer_.IsBound()) {
78 24800 : interpreted_frame_pointer_.Bind(LoadParentFramePointer());
79 126213 : } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
80 3528 : !reloaded_frame_ptr_) {
81 1512 : interpreted_frame_pointer_.Bind(LoadParentFramePointer());
82 1512 : reloaded_frame_ptr_ = true;
83 : }
84 147485 : return interpreted_frame_pointer_.value();
85 : }
86 :
87 238685 : Node* InterpreterAssembler::BytecodeOffset() {
88 241989 : if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
89 3304 : (bytecode_offset_.value() ==
90 3304 : Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
91 1512 : bytecode_offset_.Bind(ReloadBytecodeOffset());
92 : }
93 238685 : return bytecode_offset_.value();
94 : }
95 :
96 3024 : Node* InterpreterAssembler::ReloadBytecodeOffset() {
97 1512 : Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
98 1512 : if (operand_scale() != OperandScale::kSingle) {
99 : // Add one to the offset such that it points to the actual bytecode rather
100 : // than the Wide / ExtraWide prefix bytecode.
101 3024 : offset = IntPtrAdd(offset, IntPtrConstant(1));
102 : }
103 1512 : return offset;
104 : }
105 :
106 118522 : void InterpreterAssembler::SaveBytecodeOffset() {
107 59261 : Node* offset = BytecodeOffset();
108 59261 : if (operand_scale() != OperandScale::kSingle) {
109 : // Subtract one from the offset such that it points to the Wide / ExtraWide
110 : // prefix bytecode.
111 152952 : offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
112 : }
113 59261 : StoreAndTagRegister(offset, Register::bytecode_offset());
114 59261 : }
115 :
116 159914 : Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
117 : // Force a re-load of the bytecode array after every call in case the debugger
118 : // has been activated.
119 159914 : if (!bytecode_array_valid_) {
120 27272 : bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
121 27272 : bytecode_array_valid_ = true;
122 : }
123 159914 : return bytecode_array_.value();
124 : }
125 :
126 88704 : Node* InterpreterAssembler::DispatchTableRawPointer() {
127 91728 : if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
128 3024 : (dispatch_table_.value() ==
129 3024 : Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
130 : dispatch_table_.Bind(ExternalConstant(
131 3024 : ExternalReference::interpreter_dispatch_table_address(isolate())));
132 : }
133 88704 : return dispatch_table_.value();
134 : }
135 :
136 0 : Node* InterpreterAssembler::GetAccumulatorUnchecked() {
137 63464 : return accumulator_.value();
138 : }
139 :
140 19112 : Node* InterpreterAssembler::GetAccumulator() {
141 : DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
142 38224 : accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
143 38224 : return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
144 : }
145 :
146 34740 : void InterpreterAssembler::SetAccumulator(Node* value) {
147 : DCHECK(Bytecodes::WritesAccumulator(bytecode_));
148 70152 : accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
149 35076 : accumulator_.Bind(value);
150 34740 : }
151 :
152 22020 : Node* InterpreterAssembler::GetContext() {
153 24876 : return LoadRegister(Register::current_context());
154 : }
155 :
156 504 : void InterpreterAssembler::SetContext(Node* value) {
157 504 : StoreRegister(value, Register::current_context());
158 504 : }
159 :
160 1176 : Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
161 1176 : Variable cur_context(this, MachineRepresentation::kTaggedPointer);
162 1176 : cur_context.Bind(context);
163 :
164 2352 : Variable cur_depth(this, MachineRepresentation::kWord32);
165 1176 : cur_depth.Bind(depth);
166 :
167 1176 : Label context_found(this);
168 :
169 1176 : Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
170 2352 : Label context_search(this, 2, context_search_loop_variables);
171 :
172 : // Fast path if the depth is 0.
173 3528 : Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
174 :
175 : // Loop until the depth is 0.
176 1176 : BIND(&context_search);
177 : {
178 4704 : cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
179 : cur_context.Bind(
180 3528 : LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
181 :
182 3528 : Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
183 2352 : &context_search);
184 : }
185 :
186 1176 : BIND(&context_found);
187 2352 : return cur_context.value();
188 : }
189 :
190 672 : void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
191 : Node* depth,
192 : Label* target) {
193 672 : Variable cur_context(this, MachineRepresentation::kTaggedPointer);
194 672 : cur_context.Bind(context);
195 :
196 1344 : Variable cur_depth(this, MachineRepresentation::kWord32);
197 672 : cur_depth.Bind(depth);
198 :
199 672 : Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
200 1344 : Label context_search(this, 2, context_search_loop_variables);
201 :
202 : // Loop until the depth is 0.
203 672 : Goto(&context_search);
204 672 : BIND(&context_search);
205 : {
206 : // TODO(leszeks): We only need to do this check if the context had a sloppy
207 : // eval, we could pass in a context chain bitmask to figure out which
208 : // contexts actually need to be checked.
209 :
210 : Node* extension_slot =
211 2016 : LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
212 :
213 : // Jump to the target if the extension slot is not a hole.
214 1344 : GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
215 :
216 2688 : cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
217 : cur_context.Bind(
218 2016 : LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
219 :
220 2016 : GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
221 1344 : &context_search);
222 672 : }
223 672 : }
224 :
225 2184 : Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
226 : return WordPoisonOnSpeculation(
227 8736 : IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
228 : }
229 :
230 0 : Node* InterpreterAssembler::RegisterLocation(Register reg) {
231 0 : return RegisterLocation(IntPtrConstant(reg.ToOperand()));
232 : }
233 :
234 0 : Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
235 55328 : return TimesSystemPointerSize(index);
236 : }
237 :
238 13608 : Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
239 : return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
240 13608 : RegisterFrameOffset(reg_index), LoadSensitivity::kCritical);
241 : }
242 :
243 65600 : Node* InterpreterAssembler::LoadRegister(Register reg) {
244 : return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
245 131200 : IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2));
246 : }
247 :
248 1512 : Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
249 : return LoadAndUntagSmi(GetInterpretedFramePointer(),
250 3024 : reg.ToOperand() << kSystemPointerSizeLog2);
251 : }
252 :
253 12936 : Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
254 : return LoadRegister(
255 12936 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
256 : }
257 :
258 168 : std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
259 : int operand_index) {
260 : DCHECK_EQ(OperandType::kRegPair,
261 : Bytecodes::GetOperandType(bytecode_, operand_index));
262 : Node* first_reg_index =
263 168 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
264 168 : Node* second_reg_index = NextRegister(first_reg_index);
265 168 : return std::make_pair(LoadRegister(first_reg_index),
266 336 : LoadRegister(second_reg_index));
267 : }
268 :
269 : InterpreterAssembler::RegListNodePair
270 2184 : InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
271 : DCHECK(Bytecodes::IsRegisterListOperandType(
272 : Bytecodes::GetOperandType(bytecode_, operand_index)));
273 : DCHECK_EQ(OperandType::kRegCount,
274 : Bytecodes::GetOperandType(bytecode_, operand_index + 1));
275 : Node* base_reg = RegisterLocation(
276 2184 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
277 2184 : Node* reg_count = BytecodeOperandCount(operand_index + 1);
278 2184 : return RegListNodePair(base_reg, reg_count);
279 : }
280 :
281 6888 : Node* InterpreterAssembler::LoadRegisterFromRegisterList(
282 : const RegListNodePair& reg_list, int index) {
283 6888 : Node* location = RegisterLocationInRegisterList(reg_list, index);
284 : // Location is already poisoned on speculation, so no need to poison here.
285 6888 : return Load(MachineType::AnyTagged(), location);
286 : }
287 :
288 7056 : Node* InterpreterAssembler::RegisterLocationInRegisterList(
289 7056 : const RegListNodePair& reg_list, int index) {
290 : CSA_ASSERT(this,
291 : Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
292 14112 : Node* offset = RegisterFrameOffset(IntPtrConstant(index));
293 : // Register indexes are negative, so subtract index from base location to get
294 : // location.
295 14112 : return IntPtrSub(reg_list.base_reg_location(), offset);
296 : }
297 :
298 504 : void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
299 : StoreNoWriteBarrier(
300 : MachineRepresentation::kTagged, GetInterpretedFramePointer(),
301 1008 : IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2), value);
302 504 : }
303 :
304 4816 : void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
305 : StoreNoWriteBarrier(MachineRepresentation::kTagged,
306 : GetInterpretedFramePointer(),
307 4816 : RegisterFrameOffset(reg_index), value);
308 4816 : }
309 :
310 59261 : void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
311 59261 : int offset = reg.ToOperand() << kSystemPointerSizeLog2;
312 59261 : StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
313 59261 : }
314 :
315 1008 : void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
316 : int operand_index) {
317 : StoreRegister(value,
318 1008 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
319 1008 : }
320 :
321 168 : void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
322 : Node* value2,
323 : int operand_index) {
324 : DCHECK_EQ(OperandType::kRegOutPair,
325 : Bytecodes::GetOperandType(bytecode_, operand_index));
326 : Node* first_reg_index =
327 168 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
328 168 : StoreRegister(value1, first_reg_index);
329 168 : Node* second_reg_index = NextRegister(first_reg_index);
330 168 : StoreRegister(value2, second_reg_index);
331 168 : }
332 :
333 336 : void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
334 : Node* value1, Node* value2, Node* value3, int operand_index) {
335 : DCHECK_EQ(OperandType::kRegOutTriple,
336 : Bytecodes::GetOperandType(bytecode_, operand_index));
337 : Node* first_reg_index =
338 336 : BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
339 336 : StoreRegister(value1, first_reg_index);
340 336 : Node* second_reg_index = NextRegister(first_reg_index);
341 336 : StoreRegister(value2, second_reg_index);
342 336 : Node* third_reg_index = NextRegister(second_reg_index);
343 336 : StoreRegister(value3, third_reg_index);
344 336 : }
345 :
346 1008 : Node* InterpreterAssembler::NextRegister(Node* reg_index) {
347 : // Register indexes are negative, so the next index is minus one.
348 3024 : return IntPtrAdd(reg_index, IntPtrConstant(-1));
349 : }
350 :
351 20086 : Node* InterpreterAssembler::OperandOffset(int operand_index) {
352 : return IntPtrConstant(
353 40172 : Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
354 : }
355 :
356 11164 : Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
357 : int operand_index, LoadSensitivity needs_poisoning) {
358 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
359 : DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
360 : bytecode_, operand_index, operand_scale()));
361 11164 : Node* operand_offset = OperandOffset(operand_index);
362 : return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
363 33492 : IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
364 : }
365 :
366 8922 : Node* InterpreterAssembler::BytecodeOperandSignedByte(
367 : int operand_index, LoadSensitivity needs_poisoning) {
368 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
369 : DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
370 : bytecode_, operand_index, operand_scale()));
371 8922 : Node* operand_offset = OperandOffset(operand_index);
372 : return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
373 26766 : IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
374 : }
375 :
376 0 : Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
377 : int relative_offset, MachineType result_type,
378 : LoadSensitivity needs_poisoning) {
379 : static const int kMaxCount = 4;
380 : DCHECK(!TargetSupportsUnalignedAccess());
381 :
382 : int count;
383 0 : switch (result_type.representation()) {
384 : case MachineRepresentation::kWord16:
385 : count = 2;
386 : break;
387 : case MachineRepresentation::kWord32:
388 : count = 4;
389 0 : break;
390 : default:
391 0 : UNREACHABLE();
392 : break;
393 : }
394 : MachineType msb_type =
395 0 : result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
396 :
397 : #if V8_TARGET_LITTLE_ENDIAN
398 : const int kStep = -1;
399 0 : int msb_offset = count - 1;
400 : #elif V8_TARGET_BIG_ENDIAN
401 : const int kStep = 1;
402 : int msb_offset = 0;
403 : #else
404 : #error "Unknown Architecture"
405 : #endif
406 :
407 : // Read the most signicant bytecode into bytes[0] and then in order
408 : // down to least significant in bytes[count - 1].
409 : DCHECK_LE(count, kMaxCount);
410 : Node* bytes[kMaxCount];
411 0 : for (int i = 0; i < count; i++) {
412 0 : MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
413 0 : Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
414 0 : Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
415 : bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
416 0 : needs_poisoning);
417 : }
418 :
419 : // Pack LSB to MSB.
420 0 : Node* result = bytes[--count];
421 0 : for (int i = 1; --count >= 0; i++) {
422 0 : Node* shift = Int32Constant(i * kBitsPerByte);
423 0 : Node* value = Word32Shl(bytes[count], shift);
424 0 : result = Word32Or(value, result);
425 : }
426 0 : return result;
427 : }
428 :
429 9914 : Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
430 9914 : int operand_index, LoadSensitivity needs_poisoning) {
431 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
432 : DCHECK_EQ(
433 : OperandSize::kShort,
434 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
435 : int operand_offset =
436 9914 : Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
437 : if (TargetSupportsUnalignedAccess()) {
438 : return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
439 29742 : IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
440 19828 : needs_poisoning);
441 : } else {
442 : return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
443 : needs_poisoning);
444 : }
445 : }
446 :
447 6626 : Node* InterpreterAssembler::BytecodeOperandSignedShort(
448 6626 : int operand_index, LoadSensitivity needs_poisoning) {
449 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
450 : DCHECK_EQ(
451 : OperandSize::kShort,
452 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
453 : int operand_offset =
454 6626 : Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
455 : if (TargetSupportsUnalignedAccess()) {
456 : return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
457 19878 : IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
458 6626 : needs_poisoning);
459 : } else {
460 : return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
461 : needs_poisoning);
462 : }
463 : }
464 :
465 9566 : Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
466 9566 : int operand_index, LoadSensitivity needs_poisoning) {
467 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
468 : DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
469 : bytecode_, operand_index, operand_scale()));
470 : int operand_offset =
471 9566 : Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
472 : if (TargetSupportsUnalignedAccess()) {
473 : return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
474 28698 : IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
475 19132 : needs_poisoning);
476 : } else {
477 : return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
478 : needs_poisoning);
479 : }
480 : }
481 :
482 6626 : Node* InterpreterAssembler::BytecodeOperandSignedQuad(
483 6626 : int operand_index, LoadSensitivity needs_poisoning) {
484 : DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
485 : DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
486 : bytecode_, operand_index, operand_scale()));
487 : int operand_offset =
488 6626 : Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
489 : if (TargetSupportsUnalignedAccess()) {
490 : return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
491 19878 : IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
492 13252 : needs_poisoning);
493 : } else {
494 : return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
495 : needs_poisoning);
496 : }
497 : }
498 :
499 22174 : Node* InterpreterAssembler::BytecodeSignedOperand(
500 : int operand_index, OperandSize operand_size,
501 : LoadSensitivity needs_poisoning) {
502 : DCHECK(!Bytecodes::IsUnsignedOperandType(
503 : Bytecodes::GetOperandType(bytecode_, operand_index)));
504 22174 : switch (operand_size) {
505 : case OperandSize::kByte:
506 8922 : return BytecodeOperandSignedByte(operand_index, needs_poisoning);
507 : case OperandSize::kShort:
508 6626 : return BytecodeOperandSignedShort(operand_index, needs_poisoning);
509 : case OperandSize::kQuad:
510 6626 : return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
511 : case OperandSize::kNone:
512 0 : UNREACHABLE();
513 : }
514 : return nullptr;
515 : }
516 :
517 30644 : Node* InterpreterAssembler::BytecodeUnsignedOperand(
518 : int operand_index, OperandSize operand_size,
519 : LoadSensitivity needs_poisoning) {
520 : DCHECK(Bytecodes::IsUnsignedOperandType(
521 : Bytecodes::GetOperandType(bytecode_, operand_index)));
522 30644 : switch (operand_size) {
523 : case OperandSize::kByte:
524 11164 : return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
525 : case OperandSize::kShort:
526 9914 : return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
527 : case OperandSize::kQuad:
528 9566 : return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
529 : case OperandSize::kNone:
530 0 : UNREACHABLE();
531 : }
532 : return nullptr;
533 : }
534 :
535 2223 : Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
536 : DCHECK_EQ(OperandType::kRegCount,
537 : Bytecodes::GetOperandType(bytecode_, operand_index));
538 : OperandSize operand_size =
539 2223 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
540 2223 : return BytecodeUnsignedOperand(operand_index, operand_size);
541 : }
542 :
543 1427 : Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
544 : DCHECK_EQ(OperandType::kFlag8,
545 : Bytecodes::GetOperandType(bytecode_, operand_index));
546 : OperandSize operand_size =
547 1427 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
548 : DCHECK_EQ(operand_size, OperandSize::kByte);
549 1427 : return BytecodeUnsignedOperand(operand_index, operand_size);
550 : }
551 :
552 4275 : Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
553 : DCHECK_EQ(OperandType::kUImm,
554 : Bytecodes::GetOperandType(bytecode_, operand_index));
555 : OperandSize operand_size =
556 4275 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
557 4275 : return BytecodeUnsignedOperand(operand_index, operand_size);
558 : }
559 :
560 2184 : Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
561 6552 : return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
562 : }
563 :
564 168 : Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
565 504 : return SmiFromInt32(BytecodeOperandUImm(operand_index));
566 : }
567 :
568 3078 : Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
569 : DCHECK_EQ(OperandType::kImm,
570 : Bytecodes::GetOperandType(bytecode_, operand_index));
571 : OperandSize operand_size =
572 3078 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
573 3078 : return BytecodeSignedOperand(operand_index, operand_size);
574 : }
575 :
576 504 : Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
577 1512 : return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
578 : }
579 :
580 2352 : Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
581 7056 : return SmiFromInt32(BytecodeOperandImm(operand_index));
582 : }
583 :
584 14469 : Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
585 : DCHECK_EQ(OperandType::kIdx,
586 : Bytecodes::GetOperandType(bytecode_, operand_index));
587 : OperandSize operand_size =
588 14469 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
589 14469 : return BytecodeUnsignedOperand(operand_index, operand_size);
590 : }
591 :
592 14469 : Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
593 43407 : return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
594 : }
595 :
596 168 : Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
597 504 : return SmiTag(BytecodeOperandIdx(operand_index));
598 : }
599 :
600 7560 : Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
601 7560 : int operand_index, LoadSensitivity needs_poisoning) {
602 : DCHECK_EQ(OperandType::kIdx,
603 : Bytecodes::GetOperandType(bytecode_, operand_index));
604 : OperandSize operand_size =
605 7560 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
606 : return ChangeUint32ToWord(
607 22680 : BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
608 : }
609 :
610 19096 : Node* InterpreterAssembler::BytecodeOperandReg(
611 19096 : int operand_index, LoadSensitivity needs_poisoning) {
612 : DCHECK(Bytecodes::IsRegisterOperandType(
613 : Bytecodes::GetOperandType(bytecode_, operand_index)));
614 : OperandSize operand_size =
615 19096 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
616 : return ChangeInt32ToIntPtr(
617 57288 : BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
618 : }
619 :
620 348 : Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
621 : DCHECK_EQ(OperandType::kRuntimeId,
622 : Bytecodes::GetOperandType(bytecode_, operand_index));
623 : OperandSize operand_size =
624 348 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
625 : DCHECK_EQ(operand_size, OperandSize::kShort);
626 348 : return BytecodeUnsignedOperand(operand_index, operand_size);
627 : }
628 :
629 171 : Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
630 171 : int operand_index) {
631 : DCHECK_EQ(OperandType::kNativeContextIndex,
632 : Bytecodes::GetOperandType(bytecode_, operand_index));
633 : OperandSize operand_size =
634 171 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
635 : return ChangeUint32ToWord(
636 513 : BytecodeUnsignedOperand(operand_index, operand_size));
637 : }
638 :
639 171 : Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
640 : DCHECK_EQ(OperandType::kIntrinsicId,
641 : Bytecodes::GetOperandType(bytecode_, operand_index));
642 : OperandSize operand_size =
643 171 : Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
644 : DCHECK_EQ(operand_size, OperandSize::kByte);
645 171 : return BytecodeUnsignedOperand(operand_index, operand_size);
646 : }
647 :
648 8592 : Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
649 8592 : TNode<FixedArray> constant_pool = CAST(LoadObjectField(
650 : BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
651 : return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
652 8592 : LoadSensitivity::kCritical);
653 : }
654 :
655 336 : Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
656 1008 : return SmiUntag(LoadConstantPoolEntry(index));
657 : }
658 :
659 7560 : Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
660 : int operand_index) {
661 : Node* index =
662 7560 : BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
663 7560 : return LoadConstantPoolEntry(index);
664 : }
665 :
666 1680 : Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
667 : int operand_index) {
668 5040 : return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
669 : }
670 :
671 348 : TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
672 348 : TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
673 348 : return CodeStubAssembler::LoadFeedbackVector(function);
674 : }
675 :
676 11760 : Node* InterpreterAssembler::LoadFeedbackVectorUnchecked() {
677 11760 : TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
678 23520 : return CodeStubAssembler::LoadFeedbackVectorUnchecked(function);
679 : }
680 :
681 60386 : void InterpreterAssembler::CallPrologue() {
682 60386 : if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
683 : // Bytecodes that make a call along the critical path save the bytecode
684 : // offset in the bytecode handler's prologue. For other bytecodes, if
685 : // there are multiple calls in the bytecode handler, you need to spill
686 : // before each of them, unless SaveBytecodeOffset has explicitly been called
687 : // in a path that dominates _all_ of those calls (which we don't track).
688 55319 : SaveBytecodeOffset();
689 : }
690 :
691 60386 : if (FLAG_debug_code && !disable_stack_check_across_call_) {
692 : DCHECK_NULL(stack_pointer_before_call_);
693 0 : stack_pointer_before_call_ = LoadStackPointer();
694 : }
695 60386 : bytecode_array_valid_ = false;
696 60386 : made_call_ = true;
697 60386 : }
698 :
699 60386 : void InterpreterAssembler::CallEpilogue() {
700 60386 : if (FLAG_debug_code && !disable_stack_check_across_call_) {
701 0 : Node* stack_pointer_after_call = LoadStackPointer();
702 0 : Node* stack_pointer_before_call = stack_pointer_before_call_;
703 0 : stack_pointer_before_call_ = nullptr;
704 : AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
705 0 : AbortReason::kUnexpectedStackPointer);
706 : }
707 60386 : }
708 :
709 2016 : void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
710 : Node* slot_id) {
711 2016 : Comment("increment call count");
712 : TNode<Smi> call_count =
713 2016 : CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
714 : // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
715 : // count are used as flags. To increment the call count by 1 we hence
716 : // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
717 : Node* new_count = SmiAdd(
718 4032 : call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
719 : // Count is Smi, so we don't need a write barrier.
720 : StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
721 2016 : SKIP_WRITE_BARRIER, kTaggedSize);
722 2016 : }
723 :
724 1848 : void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
725 : Node* feedback_vector,
726 : Node* slot_id) {
727 3696 : Label extra_checks(this, Label::kDeferred), done(this);
728 :
729 : // Check if we have monomorphic {target} feedback already.
730 : TNode<MaybeObject> feedback =
731 1848 : LoadFeedbackVectorSlot(feedback_vector, slot_id);
732 1848 : Comment("check if monomorphic");
733 1848 : TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
734 1848 : GotoIf(is_monomorphic, &done);
735 :
736 : // Check if it is a megamorphic {target}.
737 1848 : Comment("check if megamorphic");
738 : Node* is_megamorphic = WordEqual(
739 1848 : feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
740 1848 : Branch(is_megamorphic, &done, &extra_checks);
741 :
742 1848 : BIND(&extra_checks);
743 : {
744 1848 : Label initialize(this), mark_megamorphic(this);
745 :
746 1848 : Comment("check if weak reference");
747 : Node* is_uninitialized = WordEqual(
748 : feedback,
749 1848 : HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
750 1848 : GotoIf(is_uninitialized, &initialize);
751 : CSA_ASSERT(this, IsWeakOrCleared(feedback));
752 :
753 : // If the weak reference is cleared, we have a new chance to become
754 : // monomorphic.
755 1848 : Comment("check if weak reference is cleared");
756 3696 : Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
757 :
758 1848 : BIND(&initialize);
759 : {
760 : // Check if {target} is a JSFunction in the current native context.
761 1848 : Comment("check if function in same native context");
762 3696 : GotoIf(TaggedIsSmi(target), &mark_megamorphic);
763 : // Check if the {target} is a JSFunction or JSBoundFunction
764 : // in the current native context.
765 1848 : VARIABLE(var_current, MachineRepresentation::kTagged, target);
766 1848 : Label loop(this, &var_current), done_loop(this);
767 1848 : Goto(&loop);
768 1848 : BIND(&loop);
769 : {
770 1848 : Label if_boundfunction(this), if_function(this);
771 1848 : Node* current = var_current.value();
772 : CSA_ASSERT(this, TaggedIsNotSmi(current));
773 3696 : Node* current_instance_type = LoadInstanceType(current);
774 1848 : GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
775 3696 : &if_boundfunction);
776 1848 : Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
777 3696 : &if_function, &mark_megamorphic);
778 :
779 1848 : BIND(&if_function);
780 : {
781 : // Check that the JSFunction {current} is in the current native
782 : // context.
783 : Node* current_context =
784 : LoadObjectField(current, JSFunction::kContextOffset);
785 3696 : Node* current_native_context = LoadNativeContext(current_context);
786 3696 : Branch(WordEqual(LoadNativeContext(context), current_native_context),
787 1848 : &done_loop, &mark_megamorphic);
788 : }
789 :
790 1848 : BIND(&if_boundfunction);
791 : {
792 : // Continue with the [[BoundTargetFunction]] of {target}.
793 : var_current.Bind(LoadObjectField(
794 1848 : current, JSBoundFunction::kBoundTargetFunctionOffset));
795 1848 : Goto(&loop);
796 1848 : }
797 : }
798 1848 : BIND(&done_loop);
799 : StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
800 1848 : CAST(target));
801 1848 : ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
802 3696 : Goto(&done);
803 : }
804 :
805 1848 : BIND(&mark_megamorphic);
806 : {
807 : // MegamorphicSentinel is an immortal immovable object so
808 : // write-barrier is not needed.
809 1848 : Comment("transition to megamorphic");
810 : DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
811 : StoreFeedbackVectorSlot(
812 : feedback_vector, slot_id,
813 1848 : HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
814 1848 : SKIP_WRITE_BARRIER);
815 : ReportFeedbackUpdate(feedback_vector, slot_id,
816 1848 : "Call:TransitionMegamorphic");
817 1848 : Goto(&done);
818 1848 : }
819 : }
820 :
821 3696 : BIND(&done);
822 1848 : }
823 :
824 1680 : void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
825 : Node* maybe_feedback_vector,
826 : Node* slot_id) {
827 1680 : Label feedback_done(this);
828 : // If feedback_vector is not valid, then nothing to do.
829 3360 : GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
830 :
831 : CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
832 :
833 : // Increment the call count.
834 1680 : IncrementCallCount(maybe_feedback_vector, slot_id);
835 :
836 : // Collect the callable {target} feedback.
837 1680 : CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
838 1680 : Goto(&feedback_done);
839 :
840 1680 : BIND(&feedback_done);
841 1680 : }
842 :
843 1008 : void InterpreterAssembler::CallJSAndDispatch(
844 2016 : Node* function, Node* context, const RegListNodePair& args,
845 : ConvertReceiverMode receiver_mode) {
846 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
847 : DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
848 : bytecode_ == Bytecode::kInvokeIntrinsic);
849 : DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
850 :
851 : Node* args_count;
852 1008 : if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
853 : // The receiver is implied, so it is not in the argument list.
854 : args_count = args.reg_count();
855 : } else {
856 : // Subtract the receiver from the argument count.
857 1344 : Node* receiver_count = Int32Constant(1);
858 1344 : args_count = Int32Sub(args.reg_count(), receiver_count);
859 : }
860 :
861 : Callable callable = CodeFactory::InterpreterPushArgsThenCall(
862 1008 : isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
863 : Node* code_target = HeapConstant(callable.code());
864 :
865 : TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
866 : args_count, args.base_reg_location(),
867 1008 : function);
868 : // TailCallStubThenDispatch updates accumulator with result.
869 2016 : accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
870 1008 : }
871 :
872 : template <class... TArgs>
873 1008 : void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
874 : Node* arg_count,
875 : ConvertReceiverMode receiver_mode,
876 : TArgs... args) {
877 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
878 : DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
879 : bytecode_ == Bytecode::kInvokeIntrinsic);
880 : DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
881 1008 : Callable callable = CodeFactory::Call(isolate());
882 : Node* code_target = HeapConstant(callable.code());
883 :
884 1008 : if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
885 : // The first argument parameter (the receiver) is implied to be undefined.
886 1008 : TailCallStubThenBytecodeDispatch(
887 : callable.descriptor(), code_target, context, function, arg_count,
888 504 : static_cast<Node*>(UndefinedConstant()), args...);
889 : } else {
890 504 : TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
891 : context, function, arg_count, args...);
892 : }
893 : // TailCallStubThenDispatch updates accumulator with result.
894 2016 : accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
895 1008 : }
896 :
897 : // Instantiate CallJSAndDispatch() for argument counts used by interpreter
898 : // generator.
899 : template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
900 : Node* function, Node* context, Node* arg_count,
901 : ConvertReceiverMode receiver_mode);
902 : template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
903 : Node* function, Node* context, Node* arg_count,
904 : ConvertReceiverMode receiver_mode, Node*);
905 : template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
906 : Node* function, Node* context, Node* arg_count,
907 : ConvertReceiverMode receiver_mode, Node*, Node*);
908 : template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
909 : Node* function, Node* context, Node* arg_count,
910 : ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
911 :
912 168 : void InterpreterAssembler::CallJSWithSpreadAndDispatch(
913 336 : Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
914 : Node* maybe_feedback_vector) {
915 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
916 : DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
917 168 : CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
918 168 : Comment("call using CallWithSpread builtin");
919 : Callable callable = CodeFactory::InterpreterPushArgsThenCall(
920 : isolate(), ConvertReceiverMode::kAny,
921 168 : InterpreterPushArgsMode::kWithFinalSpread);
922 : Node* code_target = HeapConstant(callable.code());
923 :
924 336 : Node* receiver_count = Int32Constant(1);
925 336 : Node* args_count = Int32Sub(args.reg_count(), receiver_count);
926 : TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
927 : args_count, args.base_reg_location(),
928 168 : function);
929 : // TailCallStubThenDispatch updates accumulator with result.
930 336 : accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
931 168 : }
932 :
933 168 : Node* InterpreterAssembler::Construct(Node* target, Node* context,
934 : Node* new_target,
935 336 : const RegListNodePair& args,
936 : Node* slot_id, Node* feedback_vector) {
937 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
938 168 : VARIABLE(var_result, MachineRepresentation::kTagged);
939 336 : VARIABLE(var_site, MachineRepresentation::kTagged);
940 168 : Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
941 168 : construct(this), construct_array(this, &var_site);
942 336 : GotoIf(IsUndefined(feedback_vector), &construct);
943 :
944 : // Increment the call count.
945 168 : IncrementCallCount(feedback_vector, slot_id);
946 :
947 : // Check if we have monomorphic {new_target} feedback already.
948 : TNode<MaybeObject> feedback =
949 168 : LoadFeedbackVectorSlot(feedback_vector, slot_id);
950 : Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
951 336 : &extra_checks);
952 :
953 168 : BIND(&extra_checks);
954 : {
955 168 : Label check_allocation_site(this), check_initialized(this),
956 168 : initialize(this), mark_megamorphic(this);
957 :
958 : // Check if it is a megamorphic {new_target}..
959 168 : Comment("check if megamorphic");
960 : Node* is_megamorphic = WordEqual(
961 168 : feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
962 168 : GotoIf(is_megamorphic, &construct);
963 :
964 168 : Comment("check if weak reference");
965 336 : GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site);
966 :
967 : // If the weak reference is cleared, we have a new chance to become
968 : // monomorphic.
969 168 : Comment("check if weak reference is cleared");
970 336 : Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
971 :
972 168 : BIND(&check_allocation_site);
973 : {
974 : // Check if it is an AllocationSite.
975 168 : Comment("check if allocation site");
976 : TNode<HeapObject> strong_feedback = CAST(feedback);
977 336 : GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
978 :
979 : // Make sure that {target} and {new_target} are the Array constructor.
980 168 : Node* array_function = LoadContextElement(LoadNativeContext(context),
981 504 : Context::ARRAY_FUNCTION_INDEX);
982 336 : GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
983 336 : GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
984 168 : var_site.Bind(strong_feedback);
985 168 : Goto(&construct_array);
986 : }
987 :
988 168 : BIND(&check_initialized);
989 : {
990 : // Check if it is uninitialized.
991 168 : Comment("check if uninitialized");
992 : Node* is_uninitialized =
993 168 : WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
994 168 : Branch(is_uninitialized, &initialize, &mark_megamorphic);
995 : }
996 :
997 168 : BIND(&initialize);
998 : {
999 168 : Comment("check if function in same native context");
1000 336 : GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1001 : // Check if the {new_target} is a JSFunction or JSBoundFunction
1002 : // in the current native context.
1003 168 : VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1004 168 : Label loop(this, &var_current), done_loop(this);
1005 168 : Goto(&loop);
1006 168 : BIND(&loop);
1007 : {
1008 168 : Label if_boundfunction(this), if_function(this);
1009 168 : Node* current = var_current.value();
1010 : CSA_ASSERT(this, TaggedIsNotSmi(current));
1011 336 : Node* current_instance_type = LoadInstanceType(current);
1012 168 : GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1013 336 : &if_boundfunction);
1014 168 : Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1015 336 : &if_function, &mark_megamorphic);
1016 :
1017 168 : BIND(&if_function);
1018 : {
1019 : // Check that the JSFunction {current} is in the current native
1020 : // context.
1021 : Node* current_context =
1022 : LoadObjectField(current, JSFunction::kContextOffset);
1023 336 : Node* current_native_context = LoadNativeContext(current_context);
1024 336 : Branch(WordEqual(LoadNativeContext(context), current_native_context),
1025 168 : &done_loop, &mark_megamorphic);
1026 : }
1027 :
1028 168 : BIND(&if_boundfunction);
1029 : {
1030 : // Continue with the [[BoundTargetFunction]] of {current}.
1031 : var_current.Bind(LoadObjectField(
1032 168 : current, JSBoundFunction::kBoundTargetFunctionOffset));
1033 168 : Goto(&loop);
1034 168 : }
1035 : }
1036 168 : BIND(&done_loop);
1037 :
1038 : // Create an AllocationSite if {target} and {new_target} refer
1039 : // to the current native context's Array constructor.
1040 168 : Label create_allocation_site(this), store_weak_reference(this);
1041 336 : GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
1042 168 : Node* array_function = LoadContextElement(LoadNativeContext(context),
1043 504 : Context::ARRAY_FUNCTION_INDEX);
1044 168 : Branch(WordEqual(target, array_function), &create_allocation_site,
1045 336 : &store_weak_reference);
1046 :
1047 168 : BIND(&create_allocation_site);
1048 : {
1049 : var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
1050 504 : SmiTag(slot_id)));
1051 : ReportFeedbackUpdate(feedback_vector, slot_id,
1052 168 : "Construct:CreateAllocationSite");
1053 168 : Goto(&construct_array);
1054 : }
1055 :
1056 168 : BIND(&store_weak_reference);
1057 : {
1058 : StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1059 168 : CAST(new_target));
1060 : ReportFeedbackUpdate(feedback_vector, slot_id,
1061 168 : "Construct:StoreWeakReference");
1062 168 : Goto(&construct);
1063 168 : }
1064 : }
1065 :
1066 168 : BIND(&mark_megamorphic);
1067 : {
1068 : // MegamorphicSentinel is an immortal immovable object so
1069 : // write-barrier is not needed.
1070 168 : Comment("transition to megamorphic");
1071 : DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1072 : StoreFeedbackVectorSlot(
1073 : feedback_vector, slot_id,
1074 168 : HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1075 168 : SKIP_WRITE_BARRIER);
1076 : ReportFeedbackUpdate(feedback_vector, slot_id,
1077 168 : "Construct:TransitionMegamorphic");
1078 168 : Goto(&construct);
1079 168 : }
1080 : }
1081 :
1082 168 : BIND(&construct_array);
1083 : {
1084 : // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
1085 : // constructor feedback collection inside of Ignition.
1086 168 : Comment("call using ConstructArray builtin");
1087 : Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1088 168 : isolate(), InterpreterPushArgsMode::kArrayFunction);
1089 : Node* code_target = HeapConstant(callable.code());
1090 : var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1091 : args.reg_count(), args.base_reg_location(), target,
1092 672 : new_target, var_site.value()));
1093 168 : Goto(&return_result);
1094 : }
1095 :
1096 168 : BIND(&construct);
1097 : {
1098 : // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
1099 168 : Comment("call using Construct builtin");
1100 : Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1101 168 : isolate(), InterpreterPushArgsMode::kOther);
1102 : Node* code_target = HeapConstant(callable.code());
1103 : var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1104 : args.reg_count(), args.base_reg_location(), target,
1105 672 : new_target, UndefinedConstant()));
1106 168 : Goto(&return_result);
1107 : }
1108 :
1109 168 : BIND(&return_result);
1110 336 : return var_result.value();
1111 : }
1112 :
1113 168 : Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
1114 : Node* new_target,
1115 168 : const RegListNodePair& args,
1116 : Node* slot_id,
1117 : Node* feedback_vector) {
1118 : // TODO(bmeurer): Unify this with the Construct bytecode feedback
1119 : // above once we have a way to pass the AllocationSite to the Array
1120 : // constructor _and_ spread the last argument at the same time.
1121 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1122 336 : Label extra_checks(this, Label::kDeferred), construct(this);
1123 336 : GotoIf(IsUndefined(feedback_vector), &construct);
1124 :
1125 : // Increment the call count.
1126 168 : IncrementCallCount(feedback_vector, slot_id);
1127 :
1128 : // Check if we have monomorphic {new_target} feedback already.
1129 : TNode<MaybeObject> feedback =
1130 168 : LoadFeedbackVectorSlot(feedback_vector, slot_id);
1131 : Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
1132 336 : &extra_checks);
1133 :
1134 168 : BIND(&extra_checks);
1135 : {
1136 168 : Label check_initialized(this), initialize(this), mark_megamorphic(this);
1137 :
1138 : // Check if it is a megamorphic {new_target}.
1139 168 : Comment("check if megamorphic");
1140 : Node* is_megamorphic = WordEqual(
1141 168 : feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1142 168 : GotoIf(is_megamorphic, &construct);
1143 :
1144 168 : Comment("check if weak reference");
1145 336 : GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
1146 :
1147 : // If the weak reference is cleared, we have a new chance to become
1148 : // monomorphic.
1149 168 : Comment("check if weak reference is cleared");
1150 336 : Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
1151 :
1152 168 : BIND(&check_initialized);
1153 : {
1154 : // Check if it is uninitialized.
1155 168 : Comment("check if uninitialized");
1156 : Node* is_uninitialized =
1157 168 : WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
1158 168 : Branch(is_uninitialized, &initialize, &mark_megamorphic);
1159 : }
1160 :
1161 168 : BIND(&initialize);
1162 : {
1163 168 : Comment("check if function in same native context");
1164 336 : GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1165 : // Check if the {new_target} is a JSFunction or JSBoundFunction
1166 : // in the current native context.
1167 168 : VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1168 168 : Label loop(this, &var_current), done_loop(this);
1169 168 : Goto(&loop);
1170 168 : BIND(&loop);
1171 : {
1172 168 : Label if_boundfunction(this), if_function(this);
1173 168 : Node* current = var_current.value();
1174 : CSA_ASSERT(this, TaggedIsNotSmi(current));
1175 336 : Node* current_instance_type = LoadInstanceType(current);
1176 168 : GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1177 336 : &if_boundfunction);
1178 168 : Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1179 336 : &if_function, &mark_megamorphic);
1180 :
1181 168 : BIND(&if_function);
1182 : {
1183 : // Check that the JSFunction {current} is in the current native
1184 : // context.
1185 : Node* current_context =
1186 : LoadObjectField(current, JSFunction::kContextOffset);
1187 336 : Node* current_native_context = LoadNativeContext(current_context);
1188 336 : Branch(WordEqual(LoadNativeContext(context), current_native_context),
1189 168 : &done_loop, &mark_megamorphic);
1190 : }
1191 :
1192 168 : BIND(&if_boundfunction);
1193 : {
1194 : // Continue with the [[BoundTargetFunction]] of {current}.
1195 : var_current.Bind(LoadObjectField(
1196 168 : current, JSBoundFunction::kBoundTargetFunctionOffset));
1197 168 : Goto(&loop);
1198 168 : }
1199 : }
1200 168 : BIND(&done_loop);
1201 : StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1202 168 : CAST(new_target));
1203 : ReportFeedbackUpdate(feedback_vector, slot_id,
1204 168 : "ConstructWithSpread:Initialize");
1205 336 : Goto(&construct);
1206 : }
1207 :
1208 168 : BIND(&mark_megamorphic);
1209 : {
1210 : // MegamorphicSentinel is an immortal immovable object so
1211 : // write-barrier is not needed.
1212 168 : Comment("transition to megamorphic");
1213 : DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1214 : StoreFeedbackVectorSlot(
1215 : feedback_vector, slot_id,
1216 168 : HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1217 168 : SKIP_WRITE_BARRIER);
1218 : ReportFeedbackUpdate(feedback_vector, slot_id,
1219 168 : "ConstructWithSpread:TransitionMegamorphic");
1220 168 : Goto(&construct);
1221 168 : }
1222 : }
1223 :
1224 168 : BIND(&construct);
1225 168 : Comment("call using ConstructWithSpread builtin");
1226 : Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1227 168 : isolate(), InterpreterPushArgsMode::kWithFinalSpread);
1228 : Node* code_target = HeapConstant(callable.code());
1229 : return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
1230 : args.base_reg_location(), target, new_target,
1231 840 : UndefinedConstant());
1232 : }
1233 :
1234 342 : Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
1235 342 : const RegListNodePair& args,
1236 : int result_size) {
1237 : DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1238 : DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1239 342 : Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
1240 : Node* code_target = HeapConstant(callable.code());
1241 :
1242 : // Get the function entry from the function id.
1243 : Node* function_table = ExternalConstant(
1244 684 : ExternalReference::runtime_function_table_address(isolate()));
1245 : Node* function_offset =
1246 1026 : Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1247 : Node* function =
1248 1026 : IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1249 : Node* function_entry =
1250 : Load(MachineType::Pointer(), function,
1251 684 : IntPtrConstant(offsetof(Runtime::Function, entry)));
1252 :
1253 : return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
1254 : result_size, code_target, context, args.reg_count(),
1255 684 : args.base_reg_location(), function_entry);
1256 : }
1257 :
1258 4256 : void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
1259 4256 : Comment("[ UpdateInterruptBudget");
1260 :
1261 : Node* budget_offset =
1262 8512 : IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
1263 :
1264 : // Assert that the weight is positive (negative weights should be implemented
1265 : // as backward updates).
1266 : CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1267 :
1268 : // Update budget by |weight| and check if it reaches zero.
1269 4256 : Variable new_budget(this, MachineRepresentation::kWord32);
1270 : Node* old_budget =
1271 4256 : Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
1272 : // Make sure we include the current bytecode in the budget calculation.
1273 : Node* budget_after_bytecode =
1274 12768 : Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1275 :
1276 4256 : if (backward) {
1277 1120 : new_budget.Bind(Int32Sub(budget_after_bytecode, weight));
1278 :
1279 : Node* condition =
1280 2240 : Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1281 560 : Label ok(this), interrupt_check(this, Label::kDeferred);
1282 560 : Branch(condition, &ok, &interrupt_check);
1283 :
1284 : // Perform interrupt and reset budget.
1285 560 : BIND(&interrupt_check);
1286 : {
1287 : CallRuntime(Runtime::kInterrupt, GetContext());
1288 1120 : new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
1289 560 : Goto(&ok);
1290 : }
1291 :
1292 1120 : BIND(&ok);
1293 : } else {
1294 : // For a forward jump, we know we only increase the interrupt budget, so
1295 : // no need to check if it's below zero.
1296 7392 : new_budget.Bind(Int32Add(budget_after_bytecode, weight));
1297 : }
1298 :
1299 : // Update budget.
1300 : StoreNoWriteBarrier(MachineRepresentation::kWord32,
1301 : BytecodeArrayTaggedPointer(), budget_offset,
1302 4256 : new_budget.value());
1303 4256 : Comment("] UpdateInterruptBudget");
1304 4256 : }
1305 :
1306 41328 : Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
1307 :
1308 41440 : Node* InterpreterAssembler::Advance(int delta) {
1309 82880 : return Advance(IntPtrConstant(delta));
1310 : }
1311 :
1312 45472 : Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
1313 : #ifdef V8_TRACE_IGNITION
1314 : TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
1315 : #endif
1316 46480 : Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1317 226688 : : IntPtrAdd(BytecodeOffset(), delta);
1318 45472 : bytecode_offset_.Bind(next_offset);
1319 45472 : return next_offset;
1320 : }
1321 :
1322 4032 : Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
1323 : DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1324 :
1325 12096 : UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
1326 4032 : Node* new_bytecode_offset = Advance(delta, backward);
1327 4032 : Node* target_bytecode = LoadBytecode(new_bytecode_offset);
1328 4032 : return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1329 : }
1330 :
1331 3696 : Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
1332 :
1333 336 : Node* InterpreterAssembler::JumpBackward(Node* delta) {
1334 336 : return Jump(delta, true);
1335 : }
1336 :
1337 2016 : void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1338 4032 : Label match(this), no_match(this);
1339 :
1340 2016 : Branch(condition, &match, &no_match);
1341 2016 : BIND(&match);
1342 : Jump(delta);
1343 2016 : BIND(&no_match);
1344 4032 : Dispatch();
1345 2016 : }
1346 :
1347 1344 : void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
1348 2688 : JumpConditional(WordEqual(lhs, rhs), delta);
1349 1344 : }
1350 :
1351 672 : void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
1352 : Node* delta) {
1353 1344 : JumpConditional(WordNotEqual(lhs, rhs), delta);
1354 672 : }
1355 :
1356 45472 : Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
1357 : Node* bytecode =
1358 45472 : Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1359 90944 : return ChangeUint32ToWord(bytecode);
1360 : }
1361 :
1362 2296 : Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
1363 4592 : Label do_inline_star(this), done(this);
1364 :
1365 4592 : Variable var_bytecode(this, MachineType::PointerRepresentation());
1366 2296 : var_bytecode.Bind(target_bytecode);
1367 :
1368 4592 : Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
1369 4592 : Node* is_star = WordEqual(target_bytecode, star_bytecode);
1370 2296 : Branch(is_star, &do_inline_star, &done);
1371 :
1372 2296 : BIND(&do_inline_star);
1373 : {
1374 2296 : InlineStar();
1375 2296 : var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
1376 2296 : Goto(&done);
1377 : }
1378 2296 : BIND(&done);
1379 4592 : return var_bytecode.value();
1380 : }
1381 :
1382 2296 : void InterpreterAssembler::InlineStar() {
1383 2296 : Bytecode previous_bytecode = bytecode_;
1384 2296 : AccumulatorUse previous_acc_use = accumulator_use_;
1385 :
1386 2296 : bytecode_ = Bytecode::kStar;
1387 2296 : accumulator_use_ = AccumulatorUse::kNone;
1388 :
1389 : #ifdef V8_TRACE_IGNITION
1390 : TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1391 : #endif
1392 : StoreRegister(GetAccumulator(),
1393 2296 : BytecodeOperandReg(0, LoadSensitivity::kSafe));
1394 :
1395 : DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1396 :
1397 : Advance();
1398 2296 : bytecode_ = previous_bytecode;
1399 2296 : accumulator_use_ = previous_acc_use;
1400 2296 : }
1401 :
1402 39032 : Node* InterpreterAssembler::Dispatch() {
1403 39032 : Comment("========= Dispatch");
1404 : DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1405 : Node* target_offset = Advance();
1406 39032 : Node* target_bytecode = LoadBytecode(target_offset);
1407 :
1408 39032 : if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1409 2296 : target_bytecode = StarDispatchLookahead(target_bytecode);
1410 : }
1411 39032 : return DispatchToBytecode(target_bytecode, BytecodeOffset());
1412 : }
1413 :
1414 44240 : Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1415 : Node* new_bytecode_offset) {
1416 44240 : if (FLAG_trace_ignition_dispatches) {
1417 0 : TraceBytecodeDispatch(target_bytecode);
1418 : }
1419 :
1420 : Node* target_code_entry =
1421 : Load(MachineType::Pointer(), DispatchTableRawPointer(),
1422 88480 : TimesSystemPointerSize(target_bytecode));
1423 :
1424 : return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
1425 44240 : target_bytecode);
1426 : }
1427 :
1428 0 : Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1429 : Node* bytecode_offset,
1430 : Node* target_bytecode) {
1431 : // TODO(ishell): Add CSA::CodeEntryPoint(code).
1432 : Node* handler_entry =
1433 : IntPtrAdd(BitcastTaggedToWord(handler),
1434 0 : IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1435 : return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
1436 0 : target_bytecode);
1437 : }
1438 :
1439 44352 : Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1440 : Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
1441 : // Propagate speculation poisoning.
1442 88704 : Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
1443 : return TailCallBytecodeDispatch(
1444 : InterpreterDispatchDescriptor{}, poisoned_handler_entry,
1445 : GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
1446 88704 : DispatchTableRawPointer());
1447 : }
1448 :
1449 112 : void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1450 : // Dispatching a wide bytecode requires treating the prefix
1451 : // bytecode a base pointer into the dispatch table and dispatching
1452 : // the bytecode that follows relative to this base.
1453 : //
1454 : // Indices 0-255 correspond to bytecodes with operand_scale == 0
1455 : // Indices 256-511 correspond to bytecodes with operand_scale == 1
1456 : // Indices 512-767 correspond to bytecodes with operand_scale == 2
1457 : DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1458 112 : Node* next_bytecode_offset = Advance(1);
1459 112 : Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1460 :
1461 112 : if (FLAG_trace_ignition_dispatches) {
1462 0 : TraceBytecodeDispatch(next_bytecode);
1463 : }
1464 :
1465 : Node* base_index;
1466 112 : switch (operand_scale) {
1467 : case OperandScale::kDouble:
1468 112 : base_index = IntPtrConstant(1 << kBitsPerByte);
1469 56 : break;
1470 : case OperandScale::kQuadruple:
1471 112 : base_index = IntPtrConstant(2 << kBitsPerByte);
1472 56 : break;
1473 : default:
1474 0 : UNREACHABLE();
1475 : }
1476 224 : Node* target_index = IntPtrAdd(base_index, next_bytecode);
1477 : Node* target_code_entry =
1478 : Load(MachineType::Pointer(), DispatchTableRawPointer(),
1479 224 : TimesSystemPointerSize(target_index));
1480 :
1481 : DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
1482 112 : next_bytecode);
1483 112 : }
1484 :
1485 224 : void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1486 : // TODO(rmcilroy): Investigate whether it is worth supporting self
1487 : // optimization of primitive functions like FullCodegen.
1488 :
1489 : // Update profiling count by the number of bytes between the end of the
1490 : // current bytecode and the start of the first one, to simulate backedge to
1491 : // start of function.
1492 : //
1493 : // With headers and current offset, the bytecode array layout looks like:
1494 : //
1495 : // <---------- simulated backedge ----------
1496 : // | header | first bytecode | .... | return bytecode |
1497 : // |<------ current offset ------->
1498 : // ^ tagged bytecode array pointer
1499 : //
1500 : // UpdateInterruptBudget already handles adding the bytecode size to the
1501 : // length of the back-edge, so we just have to correct for the non-zero offset
1502 : // of the first bytecode.
1503 :
1504 : const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1505 672 : Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1506 1120 : Int32Constant(kFirstBytecodeOffset));
1507 224 : UpdateInterruptBudget(profiling_weight, true);
1508 224 : }
1509 :
1510 168 : Node* InterpreterAssembler::LoadOSRNestingLevel() {
1511 : return LoadObjectField(BytecodeArrayTaggedPointer(),
1512 : BytecodeArray::kOSRNestingLevelOffset,
1513 336 : MachineType::Int8());
1514 : }
1515 :
1516 1960 : void InterpreterAssembler::Abort(AbortReason abort_reason) {
1517 1960 : disable_stack_check_across_call_ = true;
1518 1960 : Node* abort_id = SmiConstant(abort_reason);
1519 : CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1520 1960 : disable_stack_check_across_call_ = false;
1521 1960 : }
1522 :
1523 0 : void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1524 : AbortReason abort_reason) {
1525 0 : Label ok(this), abort(this, Label::kDeferred);
1526 0 : Branch(WordEqual(lhs, rhs), &ok, &abort);
1527 :
1528 0 : BIND(&abort);
1529 0 : Abort(abort_reason);
1530 0 : Goto(&ok);
1531 :
1532 0 : BIND(&ok);
1533 0 : }
1534 :
1535 1176 : void InterpreterAssembler::MaybeDropFrames(Node* context) {
1536 : Node* restart_fp_address =
1537 2352 : ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1538 :
1539 1176 : Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
1540 2352 : Node* null = IntPtrConstant(0);
1541 :
1542 1176 : Label ok(this), drop_frames(this);
1543 2352 : Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1544 :
1545 1176 : BIND(&drop_frames);
1546 : // We don't expect this call to return since the frame dropper tears down
1547 : // the stack and jumps into the function on the target frame to restart it.
1548 2352 : CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1549 1176 : Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1550 1176 : Goto(&ok);
1551 :
1552 2352 : BIND(&ok);
1553 1176 : }
1554 :
1555 0 : void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1556 : CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1557 0 : SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1558 0 : }
1559 :
1560 0 : void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1561 : Node* counters_table = ExternalConstant(
1562 0 : ExternalReference::interpreter_dispatch_counters(isolate()));
1563 : Node* source_bytecode_table_index = IntPtrConstant(
1564 0 : static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1565 :
1566 : Node* counter_offset = TimesSystemPointerSize(
1567 0 : IntPtrAdd(source_bytecode_table_index, target_bytecode));
1568 : Node* old_counter =
1569 0 : Load(MachineType::IntPtr(), counters_table, counter_offset);
1570 :
1571 0 : Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1572 :
1573 : Node* counter_reached_max = WordEqual(
1574 0 : old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1575 0 : Branch(counter_reached_max, &counter_saturated, &counter_ok);
1576 :
1577 0 : BIND(&counter_ok);
1578 : {
1579 0 : Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1580 : StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1581 0 : counter_offset, new_counter);
1582 0 : Goto(&counter_saturated);
1583 : }
1584 :
1585 0 : BIND(&counter_saturated);
1586 0 : }
1587 :
1588 : // static
1589 588 : bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1590 : #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1591 : return false;
1592 : #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1593 : V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
1594 588 : return true;
1595 : #else
1596 : #error "Unknown Architecture"
1597 : #endif
1598 : }
1599 :
1600 0 : void InterpreterAssembler::AbortIfRegisterCountInvalid(
1601 : Node* parameters_and_registers, Node* formal_parameter_count,
1602 : Node* register_count) {
1603 0 : Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1604 :
1605 0 : Label ok(this), abort(this, Label::kDeferred);
1606 : Branch(UintPtrLessThanOrEqual(
1607 0 : IntPtrAdd(formal_parameter_count, register_count), array_size),
1608 0 : &ok, &abort);
1609 :
1610 0 : BIND(&abort);
1611 0 : Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1612 0 : Goto(&ok);
1613 :
1614 0 : BIND(&ok);
1615 0 : }
1616 :
1617 168 : Node* InterpreterAssembler::ExportParametersAndRegisterFile(
1618 168 : TNode<FixedArray> array, const RegListNodePair& registers,
1619 : TNode<Int32T> formal_parameter_count) {
1620 : // Store the formal parameters (without receiver) followed by the
1621 : // registers into the generator's internal parameters_and_registers field.
1622 : TNode<IntPtrT> formal_parameter_count_intptr =
1623 168 : ChangeInt32ToIntPtr(formal_parameter_count);
1624 336 : Node* register_count = ChangeUint32ToWord(registers.reg_count());
1625 168 : if (FLAG_debug_code) {
1626 : CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1627 : RegisterLocation(Register(0))));
1628 : AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1629 0 : register_count);
1630 : }
1631 :
1632 : {
1633 168 : Variable var_index(this, MachineType::PointerRepresentation());
1634 336 : var_index.Bind(IntPtrConstant(0));
1635 :
1636 : // Iterate over parameters and write them into the array.
1637 168 : Label loop(this, &var_index), done_loop(this);
1638 :
1639 : Node* reg_base = IntPtrAdd(
1640 504 : IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
1641 168 : formal_parameter_count_intptr);
1642 :
1643 168 : Goto(&loop);
1644 168 : BIND(&loop);
1645 : {
1646 168 : Node* index = var_index.value();
1647 168 : GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
1648 336 : &done_loop);
1649 :
1650 336 : Node* reg_index = IntPtrSub(reg_base, index);
1651 168 : Node* value = LoadRegister(reg_index);
1652 :
1653 168 : StoreFixedArrayElement(array, index, value);
1654 :
1655 504 : var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1656 168 : Goto(&loop);
1657 : }
1658 336 : BIND(&done_loop);
1659 : }
1660 :
1661 : {
1662 : // Iterate over register file and write values into array.
1663 : // The mapping of register to array index must match that used in
1664 : // BytecodeGraphBuilder::VisitResumeGenerator.
1665 168 : Variable var_index(this, MachineType::PointerRepresentation());
1666 336 : var_index.Bind(IntPtrConstant(0));
1667 :
1668 168 : Label loop(this, &var_index), done_loop(this);
1669 168 : Goto(&loop);
1670 168 : BIND(&loop);
1671 : {
1672 168 : Node* index = var_index.value();
1673 336 : GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1674 :
1675 : Node* reg_index =
1676 504 : IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1677 168 : Node* value = LoadRegister(reg_index);
1678 :
1679 336 : Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
1680 168 : StoreFixedArrayElement(array, array_index, value);
1681 :
1682 504 : var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1683 168 : Goto(&loop);
1684 : }
1685 336 : BIND(&done_loop);
1686 : }
1687 :
1688 168 : return array;
1689 : }
1690 :
1691 168 : Node* InterpreterAssembler::ImportRegisterFile(
1692 168 : TNode<FixedArray> array, const RegListNodePair& registers,
1693 : TNode<Int32T> formal_parameter_count) {
1694 : TNode<IntPtrT> formal_parameter_count_intptr =
1695 168 : ChangeInt32ToIntPtr(formal_parameter_count);
1696 168 : TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1697 168 : if (FLAG_debug_code) {
1698 : CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1699 : RegisterLocation(Register(0))));
1700 : AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1701 0 : register_count);
1702 : }
1703 :
1704 168 : TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1705 :
1706 : // Iterate over array and write values into register file. Also erase the
1707 : // array contents to not keep them alive artificially.
1708 168 : Label loop(this, &var_index), done_loop(this);
1709 168 : Goto(&loop);
1710 168 : BIND(&loop);
1711 : {
1712 : TNode<IntPtrT> index = var_index.value();
1713 336 : GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1714 :
1715 : TNode<IntPtrT> array_index =
1716 : IntPtrAdd(formal_parameter_count_intptr, index);
1717 : TNode<Object> value = LoadFixedArrayElement(array, array_index);
1718 :
1719 : TNode<IntPtrT> reg_index =
1720 168 : IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1721 168 : StoreRegister(value, reg_index);
1722 :
1723 : StoreFixedArrayElement(array, array_index,
1724 336 : LoadRoot(RootIndex::kStaleRegister));
1725 :
1726 168 : var_index = IntPtrAdd(index, IntPtrConstant(1));
1727 168 : Goto(&loop);
1728 : }
1729 168 : BIND(&done_loop);
1730 :
1731 168 : return array;
1732 : }
1733 :
1734 0 : int InterpreterAssembler::CurrentBytecodeSize() const {
1735 45584 : return Bytecodes::Size(bytecode_, operand_scale_);
1736 : }
1737 :
1738 336 : void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1739 336 : Node* object = GetAccumulator();
1740 : Node* context = GetContext();
1741 :
1742 336 : Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
1743 672 : Variable var_result(this, MachineRepresentation::kTagged);
1744 336 : Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1745 336 : if_objectisother(this, Label::kDeferred);
1746 :
1747 672 : GotoIf(TaggedIsSmi(object), &if_objectissmi);
1748 672 : Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
1749 :
1750 336 : BIND(&if_objectissmi);
1751 : {
1752 336 : var_result.Bind(object);
1753 336 : var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
1754 336 : Goto(&if_done);
1755 : }
1756 :
1757 336 : BIND(&if_objectisheapnumber);
1758 : {
1759 336 : var_result.Bind(object);
1760 336 : var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
1761 336 : Goto(&if_done);
1762 : }
1763 :
1764 336 : BIND(&if_objectisother);
1765 : {
1766 : auto builtin = Builtins::kNonNumberToNumber;
1767 336 : if (mode == Object::Conversion::kToNumeric) {
1768 : builtin = Builtins::kNonNumberToNumeric;
1769 : // Special case for collecting BigInt feedback.
1770 : Label not_bigint(this);
1771 336 : GotoIfNot(IsBigInt(object), ¬_bigint);
1772 : {
1773 168 : var_result.Bind(object);
1774 168 : var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
1775 168 : Goto(&if_done);
1776 : }
1777 168 : BIND(¬_bigint);
1778 : }
1779 :
1780 : // Convert {object} by calling out to the appropriate builtin.
1781 672 : var_result.Bind(CallBuiltin(builtin, context, object));
1782 336 : var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
1783 336 : Goto(&if_done);
1784 : }
1785 :
1786 336 : BIND(&if_done);
1787 :
1788 : // Record the type feedback collected for {object}.
1789 336 : Node* slot_index = BytecodeOperandIdx(0);
1790 336 : Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
1791 :
1792 336 : UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
1793 :
1794 336 : SetAccumulator(var_result.value());
1795 672 : Dispatch();
1796 336 : }
1797 :
1798 : } // namespace interpreter
1799 : } // namespace internal
1800 94089 : } // namespace v8
|