Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
6 : #define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
7 :
8 : #include "src/allocation.h"
9 : #include "src/builtins/builtins.h"
10 : #include "src/code-stub-assembler.h"
11 : #include "src/globals.h"
12 : #include "src/interpreter/bytecode-register.h"
13 : #include "src/interpreter/bytecodes.h"
14 : #include "src/runtime/runtime.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 : namespace interpreter {
19 :
20 : class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
21 : public:
22 : InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
23 : OperandScale operand_scale);
24 : ~InterpreterAssembler();
25 :
26 : // Returns the 32-bit unsigned count immediate for bytecode operand
27 : // |operand_index| in the current bytecode.
28 : compiler::Node* BytecodeOperandCount(int operand_index);
29 : // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
30 : // in the current bytecode.
31 : compiler::Node* BytecodeOperandFlag(int operand_index);
32 : // Returns the 32-bit zero-extended index immediate for bytecode operand
33 : // |operand_index| in the current bytecode.
34 : compiler::Node* BytecodeOperandIdxInt32(int operand_index);
35 : // Returns the word zero-extended index immediate for bytecode operand
36 : // |operand_index| in the current bytecode.
37 : compiler::Node* BytecodeOperandIdx(int operand_index);
38 : // Returns the smi index immediate for bytecode operand |operand_index|
39 : // in the current bytecode.
40 : compiler::Node* BytecodeOperandIdxSmi(int operand_index);
41 : // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
42 : // in the current bytecode.
43 : compiler::Node* BytecodeOperandUImm(int operand_index);
44 : // Returns the word-size unsigned immediate for bytecode operand
45 : // |operand_index| in the current bytecode.
46 : compiler::Node* BytecodeOperandUImmWord(int operand_index);
47 : // Returns the unsigned smi immediate for bytecode operand |operand_index| in
48 : // the current bytecode.
49 : compiler::Node* BytecodeOperandUImmSmi(int operand_index);
50 : // Returns the 32-bit signed immediate for bytecode operand |operand_index|
51 : // in the current bytecode.
52 : compiler::Node* BytecodeOperandImm(int operand_index);
53 : // Returns the word-size signed immediate for bytecode operand |operand_index|
54 : // in the current bytecode.
55 : compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
56 : // Returns the smi immediate for bytecode operand |operand_index| in the
57 : // current bytecode.
58 : compiler::Node* BytecodeOperandImmSmi(int operand_index);
59 : // Returns the word-size sign-extended register index for bytecode operand
60 : // |operand_index| in the current bytecode.
61 : compiler::Node* BytecodeOperandReg(int operand_index);
62 : // Returns the 32-bit unsigned runtime id immediate for bytecode operand
63 : // |operand_index| in the current bytecode.
64 : compiler::Node* BytecodeOperandRuntimeId(int operand_index);
65 : // Returns the 32-bit unsigned native context index immediate for bytecode
66 : // operand |operand_index| in the current bytecode.
67 : compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
68 : // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
69 : // |operand_index| in the current bytecode.
70 : compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
71 :
72 : // Accumulator.
73 : compiler::Node* GetAccumulator();
74 : void SetAccumulator(compiler::Node* value);
75 :
76 : // Context.
77 : compiler::Node* GetContext();
78 : void SetContext(compiler::Node* value);
79 :
80 : // Context at |depth| in the context chain starting at |context|.
81 : compiler::Node* GetContextAtDepth(compiler::Node* context,
82 : compiler::Node* depth);
83 :
84 : // Goto the given |target| if the context chain starting at |context| has any
85 : // extensions up to the given |depth|.
86 : void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
87 : compiler::Node* depth, Label* target);
88 :
89 : // Backup/restore register file to/from a fixed array of the correct length.
90 : compiler::Node* ExportRegisterFile(compiler::Node* array,
91 : compiler::Node* register_count);
92 : compiler::Node* ImportRegisterFile(compiler::Node* array,
93 : compiler::Node* register_count);
94 :
95 : // Loads from and stores to the interpreter register file.
96 : compiler::Node* LoadRegister(Register reg);
97 : compiler::Node* LoadRegister(compiler::Node* reg_index);
98 : compiler::Node* LoadAndUntagRegister(Register reg);
99 : compiler::Node* StoreRegister(compiler::Node* value, Register reg);
100 : compiler::Node* StoreRegister(compiler::Node* value,
101 : compiler::Node* reg_index);
102 : compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
103 :
104 : // Returns the next consecutive register.
105 : compiler::Node* NextRegister(compiler::Node* reg_index);
106 :
107 : // Returns the location in memory of the register |reg_index| in the
108 : // interpreter register file.
109 : compiler::Node* RegisterLocation(compiler::Node* reg_index);
110 :
111 : // Load constant at |index| in the constant pool.
112 : compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
113 :
114 : // Load and untag constant at |index| in the constant pool.
115 : compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
116 :
117 : // Load the FeedbackVector for the current function.
118 : compiler::Node* LoadFeedbackVector();
119 :
120 : // Increment the call count for a CALL_IC or construct call.
121 : // The call count is located at feedback_vector[slot_id + 1].
122 : compiler::Node* IncrementCallCount(compiler::Node* feedback_vector,
123 : compiler::Node* slot_id);
124 :
125 : // Collect CALL_IC feedback for |target| function in the
126 : // |feedback_vector| at |slot_id|.
127 : void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
128 : compiler::Node* slot_id,
129 : compiler::Node* feedback_vector);
130 :
131 : // Call JSFunction or Callable |function| with |arg_count| arguments (not
132 : // including receiver) and the first argument located at |first_arg|, possibly
133 : // including the receiver depending on |receiver_mode|. After the call returns
134 : // directly dispatches to the next bytecode.
135 : void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
136 : compiler::Node* first_arg, compiler::Node* arg_count,
137 : ConvertReceiverMode receiver_mode);
138 :
139 : // Call JSFunction or Callable |function| with |arg_count| arguments (not
140 : // including receiver) passed as |args|, possibly including the receiver
141 : // depending on |receiver_mode|. After the call returns directly dispatches to
142 : // the next bytecode.
143 : template <class... TArgs>
144 : void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
145 : ConvertReceiverMode receiver_mode, TArgs... args);
146 :
147 : // Call JSFunction or Callable |function| with |arg_count|
148 : // arguments (not including receiver) and the first argument
149 : // located at |first_arg|, and the final argument being spread. After the call
150 : // returns directly dispatches to the next bytecode.
151 : void CallJSWithSpreadAndDispatch(compiler::Node* function,
152 : compiler::Node* context,
153 : compiler::Node* first_arg,
154 : compiler::Node* arg_count,
155 : compiler::Node* slot_id,
156 : compiler::Node* feedback_vector);
157 :
158 : // Call constructor |target| with |arg_count| arguments (not
159 : // including receiver) and the first argument located at
160 : // |first_arg|. The |new_target| is the same as the
161 : // |target| for the new keyword, but differs for the super
162 : // keyword.
163 : compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
164 : compiler::Node* new_target,
165 : compiler::Node* first_arg,
166 : compiler::Node* arg_count, compiler::Node* slot_id,
167 : compiler::Node* feedback_vector);
168 :
169 : // Call constructor |target| with |arg_count| arguments (not including
170 : // receiver) and the first argument located at |first_arg|. The last argument
171 : // is always a spread. The |new_target| is the same as the |target| for
172 : // the new keyword, but differs for the super keyword.
173 : compiler::Node* ConstructWithSpread(compiler::Node* target,
174 : compiler::Node* context,
175 : compiler::Node* new_target,
176 : compiler::Node* first_arg,
177 : compiler::Node* arg_count,
178 : compiler::Node* slot_id,
179 : compiler::Node* feedback_vector);
180 :
181 : // Call runtime function with |arg_count| arguments and the first argument
182 : // located at |first_arg|.
183 : compiler::Node* CallRuntimeN(compiler::Node* function_id,
184 : compiler::Node* context,
185 : compiler::Node* first_arg,
186 : compiler::Node* arg_count, int return_size = 1);
187 :
188 : // Jump forward relative to the current bytecode by the |jump_offset|.
189 : compiler::Node* Jump(compiler::Node* jump_offset);
190 :
191 : // Jump backward relative to the current bytecode by the |jump_offset|.
192 : compiler::Node* JumpBackward(compiler::Node* jump_offset);
193 :
194 : // Jump forward relative to the current bytecode by |jump_offset| if the
195 : // word values |lhs| and |rhs| are equal.
196 : void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
197 : compiler::Node* jump_offset);
198 :
199 : // Jump forward relative to the current bytecode by |jump_offset| if the
200 : // word values |lhs| and |rhs| are not equal.
201 : void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
202 : compiler::Node* jump_offset);
203 :
204 : // Returns true if the stack guard check triggers an interrupt.
205 : compiler::Node* StackCheckTriggeredInterrupt();
206 :
207 : // Updates the profiler interrupt budget for a return.
208 : void UpdateInterruptBudgetOnReturn();
209 :
210 : // Returns the OSR nesting level from the bytecode header.
211 : compiler::Node* LoadOSRNestingLevel();
212 :
213 : // Dispatch to the bytecode.
214 : compiler::Node* Dispatch();
215 :
216 : // Dispatch to bytecode handler.
217 651 : compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
218 651 : return DispatchToBytecodeHandler(handler, BytecodeOffset());
219 : }
220 :
221 : // Dispatch bytecode as wide operand variant.
222 : void DispatchWide(OperandScale operand_scale);
223 :
224 : // Abort with the given bailout reason.
225 : void Abort(BailoutReason bailout_reason);
226 : void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
227 : BailoutReason bailout_reason);
228 : // Abort if |register_count| is invalid for given register file array.
229 : void AbortIfRegisterCountInvalid(compiler::Node* register_file,
230 : compiler::Node* register_count);
231 :
232 : // Dispatch to frame dropper trampoline if necessary.
233 : void MaybeDropFrames(compiler::Node* context);
234 :
235 : // Returns the offset from the BytecodeArrayPointer of the current bytecode.
236 : compiler::Node* BytecodeOffset();
237 :
238 : protected:
239 : Bytecode bytecode() const { return bytecode_; }
240 : static bool TargetSupportsUnalignedAccess();
241 :
242 : void ToNumberOrNumeric(Object::Conversion mode);
243 :
244 : private:
245 : // Returns a tagged pointer to the current function's BytecodeArray object.
246 : compiler::Node* BytecodeArrayTaggedPointer();
247 :
248 : // Returns a raw pointer to first entry in the interpreter dispatch table.
249 : compiler::Node* DispatchTableRawPointer();
250 :
251 : // Returns the accumulator value without checking whether bytecode
252 : // uses it. This is intended to be used only in dispatch and in
253 : // tracing as these need to bypass accumulator use validity checks.
254 : compiler::Node* GetAccumulatorUnchecked();
255 :
256 : // Returns the frame pointer for the interpreted frame of the function being
257 : // interpreted.
258 : compiler::Node* GetInterpretedFramePointer();
259 :
260 : // Saves and restores interpreter bytecode offset to the interpreter stack
261 : // frame when performing a call.
262 : void CallPrologue();
263 : void CallEpilogue();
264 :
265 : // Increment the dispatch counter for the (current, next) bytecode pair.
266 : void TraceBytecodeDispatch(compiler::Node* target_index);
267 :
268 : // Traces the current bytecode by calling |function_id|.
269 : void TraceBytecode(Runtime::FunctionId function_id);
270 :
271 : // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
272 : // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
273 : // the interrupt budget is decremented, otherwise it is incremented.
274 : void UpdateInterruptBudget(compiler::Node* weight, bool backward);
275 :
276 : // Returns the offset of register |index| relative to RegisterFilePointer().
277 : compiler::Node* RegisterFrameOffset(compiler::Node* index);
278 :
279 : // Returns the offset of an operand relative to the current bytecode offset.
280 : compiler::Node* OperandOffset(int operand_index);
281 :
282 : // Returns a value built from an sequence of bytes in the bytecode
283 : // array starting at |relative_offset| from the current bytecode.
284 : // The |result_type| determines the size and signedness. of the
285 : // value read. This method should only be used on architectures that
286 : // do not support unaligned memory accesses.
287 : compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
288 : MachineType result_type);
289 :
290 : // Returns zero- or sign-extended to word32 value of the operand.
291 : compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
292 : compiler::Node* BytecodeOperandSignedByte(int operand_index);
293 : compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
294 : compiler::Node* BytecodeOperandSignedShort(int operand_index);
295 : compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
296 : compiler::Node* BytecodeOperandSignedQuad(int operand_index);
297 :
298 : // Returns zero- or sign-extended to word32 value of the operand of
299 : // given size.
300 : compiler::Node* BytecodeSignedOperand(int operand_index,
301 : OperandSize operand_size);
302 : compiler::Node* BytecodeUnsignedOperand(int operand_index,
303 : OperandSize operand_size);
304 :
305 : // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
306 : // then jump backward (subtract the offset), otherwise jump forward (add the
307 : // offset). Helper function for Jump and JumpBackward.
308 : compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
309 :
310 : // Jump forward relative to the current bytecode by |jump_offset| if the
311 : // |condition| is true. Helper function for JumpIfWordEqual and
312 : // JumpIfWordNotEqual.
313 : void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
314 :
315 : // Save the bytecode offset to the interpreter frame.
316 : void SaveBytecodeOffset();
317 : // Reload the bytecode offset from the interpreter frame.
318 : Node* ReloadBytecodeOffset();
319 :
320 : // Updates and returns BytecodeOffset() advanced by the current bytecode's
321 : // size. Traces the exit of the current bytecode.
322 : compiler::Node* Advance();
323 :
324 : // Updates and returns BytecodeOffset() advanced by delta bytecodes.
325 : // Traces the exit of the current bytecode.
326 : compiler::Node* Advance(int delta);
327 : compiler::Node* Advance(compiler::Node* delta, bool backward = false);
328 :
329 : // Load the bytecode at |bytecode_offset|.
330 : compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
331 :
332 : // Look ahead for Star and inline it in a branch. Returns a new target
333 : // bytecode node for dispatch.
334 : compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
335 :
336 : // Build code for Star at the current BytecodeOffset() and Advance() to the
337 : // next dispatch offset.
338 : void InlineStar();
339 :
340 : // Dispatch to |target_bytecode| at |new_bytecode_offset|.
341 : // |target_bytecode| should be equivalent to loading from the offset.
342 : compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
343 : compiler::Node* new_bytecode_offset);
344 :
345 : // Dispatch to the bytecode handler with code offset |handler|.
346 : compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
347 : compiler::Node* bytecode_offset);
348 :
349 : // Dispatch to the bytecode handler with code entry point |handler_entry|.
350 : compiler::Node* DispatchToBytecodeHandlerEntry(
351 : compiler::Node* handler_entry, compiler::Node* bytecode_offset);
352 :
353 : int CurrentBytecodeSize() const;
354 :
355 : OperandScale operand_scale() const { return operand_scale_; }
356 :
357 : Bytecode bytecode_;
358 : OperandScale operand_scale_;
359 : CodeStubAssembler::Variable interpreted_frame_pointer_;
360 : CodeStubAssembler::Variable bytecode_array_;
361 : CodeStubAssembler::Variable bytecode_offset_;
362 : CodeStubAssembler::Variable dispatch_table_;
363 : CodeStubAssembler::Variable accumulator_;
364 : AccumulatorUse accumulator_use_;
365 : bool made_call_;
366 : bool reloaded_frame_ptr_;
367 : bool bytecode_array_valid_;
368 :
369 : bool disable_stack_check_across_call_;
370 : compiler::Node* stack_pointer_before_call_;
371 :
372 : DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
373 : };
374 :
375 : } // namespace interpreter
376 : } // namespace internal
377 : } // namespace v8
378 :
379 : #endif // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
|