Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_OBJECTS_CODE_H_
6 : #define V8_OBJECTS_CODE_H_
7 :
8 : #include "src/contexts.h"
9 : #include "src/handler-table.h"
10 : #include "src/objects.h"
11 : #include "src/objects/fixed-array.h"
12 : #include "src/objects/heap-object.h"
13 : #include "src/objects/struct.h"
14 :
15 : // Has to be the last include (doesn't have include guards):
16 : #include "src/objects/object-macros.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : class ByteArray;
22 : class BytecodeArray;
23 : class CodeDataContainer;
24 : class MaybeObject;
25 :
26 : namespace interpreter {
27 : class Register;
28 : }
29 :
30 : // Code describes objects with on-the-fly generated machine code.
31 : class Code : public HeapObject {
32 : public:
33 : NEVER_READ_ONLY_SPACE
34 : // Opaque data type for encapsulating code flags like kind, inline
35 : // cache state, and arguments count.
36 : typedef uint32_t Flags;
37 :
38 : #define CODE_KIND_LIST(V) \
39 : V(OPTIMIZED_FUNCTION) \
40 : V(BYTECODE_HANDLER) \
41 : V(STUB) \
42 : V(BUILTIN) \
43 : V(REGEXP) \
44 : V(WASM_FUNCTION) \
45 : V(WASM_TO_JS_FUNCTION) \
46 : V(JS_TO_WASM_FUNCTION) \
47 : V(WASM_INTERPRETER_ENTRY) \
48 : V(C_WASM_ENTRY)
49 :
50 : enum Kind {
51 : #define DEFINE_CODE_KIND_ENUM(name) name,
52 : CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
53 : #undef DEFINE_CODE_KIND_ENUM
54 : NUMBER_OF_KINDS
55 : };
56 :
57 : static const char* Kind2String(Kind kind);
58 :
59 : #ifdef ENABLE_DISASSEMBLER
60 : const char* GetName(Isolate* isolate) const;
61 : void Disassemble(const char* name, std::ostream& os,
62 : Address current_pc = kNullAddress);
63 : #endif
64 :
65 : // [instruction_size]: Size of the native instructions, including embedded
66 : // data such as the safepoints table.
67 : inline int raw_instruction_size() const;
68 : inline void set_raw_instruction_size(int value);
69 :
70 : // Returns the size of the native instructions, including embedded
71 : // data such as the safepoints table. For off-heap code objects
72 : // this may from instruction_size in that this will return the size of the
73 : // off-heap instruction stream rather than the on-heap trampoline located
74 : // at instruction_start.
75 : inline int InstructionSize() const;
76 : int OffHeapInstructionSize() const;
77 :
78 : // [relocation_info]: Code relocation information
79 : DECL_ACCESSORS(relocation_info, ByteArray)
80 :
81 : // This function should be called only from GC.
82 : void ClearEmbeddedObjects(Heap* heap);
83 :
84 : // [deoptimization_data]: Array containing data for deopt.
85 : DECL_ACCESSORS(deoptimization_data, FixedArray)
86 :
87 : // [source_position_table]: ByteArray for the source positions table or
88 : // SourcePositionTableWithFrameCache.
89 : DECL_ACCESSORS(source_position_table, Object)
90 : inline ByteArray SourcePositionTable() const;
91 :
92 : // [code_data_container]: A container indirection for all mutable fields.
93 : DECL_ACCESSORS(code_data_container, CodeDataContainer)
94 :
95 : // [next_code_link]: Link for lists of optimized or deoptimized code.
96 : // Note that this field is stored in the {CodeDataContainer} to be mutable.
97 : inline Object next_code_link() const;
98 : inline void set_next_code_link(Object value);
99 :
100 : // [constant_pool offset]: Offset of the constant pool.
101 : // Valid for FLAG_enable_embedded_constant_pool only
102 : inline int constant_pool_offset() const;
103 : inline void set_constant_pool_offset(int offset);
104 : inline int constant_pool_size() const;
105 :
106 : // [code_comments_offset]: Offset of the code comment section.
107 : inline int code_comments_offset() const;
108 : inline void set_code_comments_offset(int offset);
109 : inline Address code_comments() const;
110 :
111 : // Unchecked accessors to be used during GC.
112 : inline ByteArray unchecked_relocation_info() const;
113 :
114 : inline int relocation_size() const;
115 :
116 : // [kind]: Access to specific code kind.
117 : inline Kind kind() const;
118 :
119 : inline bool is_optimized_code() const;
120 : inline bool is_wasm_code() const;
121 :
122 : // Testers for interpreter builtins.
123 : inline bool is_interpreter_trampoline_builtin() const;
124 :
125 : // Tells whether the code checks the optimization marker in the function's
126 : // feedback vector.
127 : inline bool checks_optimization_marker() const;
128 :
129 : // Tells whether the outgoing parameters of this code are tagged pointers.
130 : inline bool has_tagged_params() const;
131 :
132 : // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
133 : // code object was generated by the TurboFan optimizing compiler.
134 : inline bool is_turbofanned() const;
135 :
136 : // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
137 : // embedded objects in code should be treated weakly.
138 : inline bool can_have_weak_objects() const;
139 : inline void set_can_have_weak_objects(bool value);
140 :
141 : // [builtin_index]: For builtins, tells which builtin index the code object
142 : // has. The builtin index is a non-negative integer for builtins, and -1
143 : // otherwise.
144 : inline int builtin_index() const;
145 : inline void set_builtin_index(int id);
146 : inline bool is_builtin() const;
147 :
148 : inline bool has_safepoint_info() const;
149 :
150 : // [stack_slots]: If {has_safepoint_info()}, the number of stack slots
151 : // reserved in the code prologue.
152 : inline int stack_slots() const;
153 :
154 : // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
155 : // instruction stream where the safepoint table starts.
156 : inline int safepoint_table_offset() const;
157 : inline void set_safepoint_table_offset(int offset);
158 :
159 : // [handler_table_offset]: The offset in the instruction stream where the
160 : // exception handler table starts.
161 : inline int handler_table_offset() const;
162 : inline void set_handler_table_offset(int offset);
163 :
164 : // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
165 : // the code is going to be deoptimized.
166 : inline bool marked_for_deoptimization() const;
167 : inline void set_marked_for_deoptimization(bool flag);
168 :
169 : // [embedded_objects_cleared]: For kind OPTIMIZED_FUNCTION tells whether
170 : // the embedded objects in the code marked for deoptimization were cleared.
171 : // Note that embedded_objects_cleared() implies marked_for_deoptimization().
172 : inline bool embedded_objects_cleared() const;
173 : inline void set_embedded_objects_cleared(bool flag);
174 :
175 : // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
176 : // the code was already deoptimized.
177 : inline bool deopt_already_counted() const;
178 : inline void set_deopt_already_counted(bool flag);
179 :
180 : // [is_promise_rejection]: For kind BUILTIN tells whether the
181 : // exception thrown by the code will lead to promise rejection or
182 : // uncaught if both this and is_exception_caught is set.
183 : // Use GetBuiltinCatchPrediction to access this.
184 : inline void set_is_promise_rejection(bool flag);
185 :
186 : // [is_exception_caught]: For kind BUILTIN tells whether the
187 : // exception thrown by the code will be caught internally or
188 : // uncaught if both this and is_promise_rejection is set.
189 : // Use GetBuiltinCatchPrediction to access this.
190 : inline void set_is_exception_caught(bool flag);
191 :
192 : // [is_off_heap_trampoline]: For kind BUILTIN tells whether
193 : // this is a trampoline to an off-heap builtin.
194 : inline bool is_off_heap_trampoline() const;
195 :
196 : // [constant_pool]: The constant pool for this function.
197 : inline Address constant_pool() const;
198 :
199 : // Get the safepoint entry for the given pc.
200 : SafepointEntry GetSafepointEntry(Address pc);
201 :
202 : // The entire code object including its header is copied verbatim to the
203 : // snapshot so that it can be written in one, fast, memcpy during
204 : // deserialization. The deserializer will overwrite some pointers, rather
205 : // like a runtime linker, but the random allocation addresses used in the
206 : // mksnapshot process would still be present in the unlinked snapshot data,
207 : // which would make snapshot production non-reproducible. This method wipes
208 : // out the to-be-overwritten header data for reproducible snapshots.
209 : inline void WipeOutHeader();
210 :
211 : // Clear uninitialized padding space. This ensures that the snapshot content
212 : // is deterministic.
213 : inline void clear_padding();
214 : // Initialize the flags field. Similar to clear_padding above this ensure that
215 : // the snapshot content is deterministic.
216 : inline void initialize_flags(Kind kind, bool has_unwinding_info,
217 : bool is_turbofanned, int stack_slots,
218 : bool is_off_heap_trampoline);
219 :
220 : // Convert a target address into a code object.
221 : static inline Code GetCodeFromTargetAddress(Address address);
222 :
223 : // Convert an entry address into an object.
224 : static inline Code GetObjectFromEntryAddress(Address location_of_address);
225 :
226 : // Returns the address of the first instruction.
227 : inline Address raw_instruction_start() const;
228 :
229 : // Returns the address of the first instruction. For off-heap code objects
230 : // this differs from instruction_start (which would point to the off-heap
231 : // trampoline instead).
232 : inline Address InstructionStart() const;
233 : Address OffHeapInstructionStart() const;
234 :
235 : // Returns the address right after the last instruction.
236 : inline Address raw_instruction_end() const;
237 :
238 : // Returns the address right after the last instruction. For off-heap code
239 : // objects this differs from instruction_end (which would point to the
240 : // off-heap trampoline instead).
241 : inline Address InstructionEnd() const;
242 : Address OffHeapInstructionEnd() const;
243 :
244 : // Returns the size of the instructions, padding, relocation and unwinding
245 : // information.
246 : inline int body_size() const;
247 :
248 : // Returns the size of code and its metadata. This includes the size of code
249 : // relocation information, deoptimization data and handler table.
250 : inline int SizeIncludingMetadata() const;
251 :
252 : // Returns the address of the first relocation info (read backwards!).
253 : inline byte* relocation_start() const;
254 :
255 : // Returns the address right after the relocation info (read backwards!).
256 : inline byte* relocation_end() const;
257 :
258 : // [has_unwinding_info]: Whether this code object has unwinding information.
259 : // If it doesn't, unwinding_information_start() will point to invalid data.
260 : //
261 : // The body of all code objects has the following layout.
262 : //
263 : // +--------------------------+ <-- raw_instruction_start()
264 : // | instructions |
265 : // | ... |
266 : // +--------------------------+
267 : // | relocation info |
268 : // | ... |
269 : // +--------------------------+ <-- raw_instruction_end()
270 : //
271 : // If has_unwinding_info() is false, raw_instruction_end() points to the first
272 : // memory location after the end of the code object. Otherwise, the body
273 : // continues as follows:
274 : //
275 : // +--------------------------+
276 : // | padding to the next |
277 : // | 8-byte aligned address |
278 : // +--------------------------+ <-- raw_instruction_end()
279 : // | [unwinding_info_size] |
280 : // | as uint64_t |
281 : // +--------------------------+ <-- unwinding_info_start()
282 : // | unwinding info |
283 : // | ... |
284 : // +--------------------------+ <-- unwinding_info_end()
285 : //
286 : // and unwinding_info_end() points to the first memory location after the end
287 : // of the code object.
288 : //
289 : inline bool has_unwinding_info() const;
290 :
291 : // [unwinding_info_size]: Size of the unwinding information.
292 : inline int unwinding_info_size() const;
293 : inline void set_unwinding_info_size(int value);
294 :
295 : // Returns the address of the unwinding information, if any.
296 : inline Address unwinding_info_start() const;
297 :
298 : // Returns the address right after the end of the unwinding information.
299 : inline Address unwinding_info_end() const;
300 :
301 : // Code entry point.
302 : inline Address entry() const;
303 :
304 : // Returns true if pc is inside this object's instructions.
305 : inline bool contains(Address pc);
306 :
307 : // Relocate the code by delta bytes. Called to signal that this code
308 : // object has been moved by delta bytes.
309 : void Relocate(intptr_t delta);
310 :
311 : // Migrate code from desc without flushing the instruction cache.
312 : void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
313 :
314 : // Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
315 : // exactly the same size as the RelocInfo in |desc|.
316 : static inline void CopyRelocInfoToByteArray(ByteArray dest,
317 : const CodeDesc& desc);
318 :
319 : // Flushes the instruction cache for the executable instructions of this code
320 : // object. Make sure to call this while the code is still writable.
321 : void FlushICache() const;
322 :
323 : // Returns the object size for a given body (used for allocation).
324 : static int SizeFor(int body_size) {
325 : DCHECK_SIZE_TAG_ALIGNED(body_size);
326 538333440 : return RoundUp(kHeaderSize + body_size, kCodeAlignment);
327 : }
328 :
329 : // Calculate the size of the code object to report for log events. This takes
330 : // the layout of the code object into account.
331 : inline int ExecutableSize() const;
332 :
333 : DECL_CAST(Code)
334 :
335 : // Dispatched behavior.
336 : inline int CodeSize() const;
337 :
338 : DECL_PRINTER(Code)
339 : DECL_VERIFIER(Code)
340 :
341 : void PrintDeoptLocation(FILE* out, const char* str, Address pc);
342 : bool CanDeoptAt(Address pc);
343 :
344 : void SetMarkedForDeoptimization(const char* reason);
345 :
346 : inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
347 :
348 : bool IsIsolateIndependent(Isolate* isolate);
349 :
350 : inline bool CanContainWeakObjects();
351 :
352 : inline bool IsWeakObject(HeapObject object);
353 :
354 : static inline bool IsWeakObjectInOptimizedCode(HeapObject object);
355 :
356 : // Return true if the function is inlined in the code.
357 : bool Inlines(SharedFunctionInfo sfi);
358 :
359 : class OptimizedCodeIterator;
360 :
361 : // Layout description.
362 : #define CODE_FIELDS(V) \
363 : V(kRelocationInfoOffset, kTaggedSize) \
364 : V(kDeoptimizationDataOffset, kTaggedSize) \
365 : V(kSourcePositionTableOffset, kTaggedSize) \
366 : V(kCodeDataContainerOffset, kTaggedSize) \
367 : /* Data or code not directly visited by GC directly starts here. */ \
368 : /* The serializer needs to copy bytes starting from here verbatim. */ \
369 : /* Objects embedded into code is visited via reloc info. */ \
370 : V(kDataStart, 0) \
371 : V(kInstructionSizeOffset, kIntSize) \
372 : V(kFlagsOffset, kIntSize) \
373 : V(kSafepointTableOffsetOffset, kIntSize) \
374 : V(kHandlerTableOffsetOffset, kIntSize) \
375 : V(kConstantPoolOffset, FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
376 : V(kBuiltinIndexOffset, kIntSize) \
377 : V(kCodeCommentsOffset, kIntSize) \
378 : /* Add padding to align the instruction start following right after */ \
379 : /* the Code object header. */ \
380 : V(kHeaderPaddingStart, CODE_POINTER_PADDING(kHeaderPaddingStart)) \
381 : V(kHeaderSize, 0)
382 :
383 : DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
384 : #undef CODE_FIELDS
385 :
386 : // This documents the amount of free space we have in each Code object header
387 : // due to padding for code alignment.
388 : #if V8_TARGET_ARCH_ARM64
389 : static constexpr int kHeaderPaddingSize = 0;
390 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
391 : #elif V8_TARGET_ARCH_MIPS64
392 : static constexpr int kHeaderPaddingSize = 0;
393 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
394 : #elif V8_TARGET_ARCH_X64
395 : static constexpr int kHeaderPaddingSize = 0;
396 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
397 : #elif V8_TARGET_ARCH_ARM
398 : static constexpr int kHeaderPaddingSize = 20;
399 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
400 : #elif V8_TARGET_ARCH_IA32
401 : static constexpr int kHeaderPaddingSize = 20;
402 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
403 : #elif V8_TARGET_ARCH_MIPS
404 : static constexpr int kHeaderPaddingSize = 20;
405 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
406 : #elif V8_TARGET_ARCH_PPC64
407 : // No static assert possible since padding size depends on the
408 : // FLAG_enable_embedded_constant_pool runtime flag.
409 : #elif V8_TARGET_ARCH_S390X
410 : static constexpr int kHeaderPaddingSize = 0;
411 : STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
412 : #else
413 : #error Unknown architecture.
414 : #endif
415 :
416 : inline int GetUnwindingInfoSizeOffset() const;
417 :
418 : class BodyDescriptor;
419 :
420 : // Flags layout. BitField<type, shift, size>.
421 : #define CODE_FLAGS_BIT_FIELDS(V, _) \
422 : V(HasUnwindingInfoField, bool, 1, _) \
423 : V(KindField, Kind, 5, _) \
424 : V(IsTurbofannedField, bool, 1, _) \
425 : V(StackSlotsField, int, 24, _) \
426 : V(IsOffHeapTrampoline, bool, 1, _)
427 : DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
428 : #undef CODE_FLAGS_BIT_FIELDS
429 : static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
430 : static_assert(IsOffHeapTrampoline::kNext <= 32,
431 : "Code::flags field exhausted");
432 :
433 : // KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
434 : #define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
435 : V(MarkedForDeoptimizationField, bool, 1, _) \
436 : V(EmbeddedObjectsClearedField, bool, 1, _) \
437 : V(DeoptAlreadyCountedField, bool, 1, _) \
438 : V(CanHaveWeakObjectsField, bool, 1, _) \
439 : V(IsPromiseRejectionField, bool, 1, _) \
440 : V(IsExceptionCaughtField, bool, 1, _)
441 : DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
442 : #undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
443 : static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full");
444 :
445 : // The {marked_for_deoptimization} field is accessed from generated code.
446 : static const int kMarkedForDeoptimizationBit =
447 : MarkedForDeoptimizationField::kShift;
448 :
449 : static const int kArgumentsBits = 16;
450 : // Reserve one argument count value as the "don't adapt arguments" sentinel.
451 : static const int kMaxArguments = (1 << kArgumentsBits) - 2;
452 :
453 : private:
454 : friend class RelocIterator;
455 :
456 : bool is_promise_rejection() const;
457 : bool is_exception_caught() const;
458 :
459 95286166 : OBJECT_CONSTRUCTORS(Code, HeapObject);
460 : };
461 :
462 : class Code::OptimizedCodeIterator {
463 : public:
464 : explicit OptimizedCodeIterator(Isolate* isolate);
465 : Code Next();
466 :
467 : private:
468 : Context next_context_;
469 : Code current_code_;
470 : Isolate* isolate_;
471 :
472 : DISALLOW_HEAP_ALLOCATION(no_gc);
473 : DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator);
474 : };
475 :
476 : // CodeDataContainer is a container for all mutable fields associated with its
477 : // referencing {Code} object. Since {Code} objects reside on write-protected
478 : // pages within the heap, its header fields need to be immutable. There always
479 : // is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
480 : // field {Code::code_data_container} itself is immutable.
481 : class CodeDataContainer : public HeapObject {
482 : public:
483 : NEVER_READ_ONLY_SPACE
484 : DECL_ACCESSORS(next_code_link, Object)
485 : DECL_INT_ACCESSORS(kind_specific_flags)
486 :
487 : // Clear uninitialized padding space. This ensures that the snapshot content
488 : // is deterministic.
489 : inline void clear_padding();
490 :
491 : DECL_CAST(CodeDataContainer)
492 :
493 : // Dispatched behavior.
494 : DECL_PRINTER(CodeDataContainer)
495 : DECL_VERIFIER(CodeDataContainer)
496 :
497 : // Layout description.
498 : #define CODE_DATA_FIELDS(V) \
499 : /* Weak pointer fields. */ \
500 : V(kPointerFieldsStrongEndOffset, 0) \
501 : V(kNextCodeLinkOffset, kTaggedSize) \
502 : V(kPointerFieldsWeakEndOffset, 0) \
503 : /* Raw data fields. */ \
504 : V(kKindSpecificFlagsOffset, kIntSize) \
505 : V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
506 : /* Total size. */ \
507 : V(kSize, 0)
508 :
509 : DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_DATA_FIELDS)
510 : #undef CODE_DATA_FIELDS
511 :
512 : class BodyDescriptor;
513 :
514 94782308 : OBJECT_CONSTRUCTORS(CodeDataContainer, HeapObject);
515 : };
516 :
517 : class AbstractCode : public HeapObject {
518 : public:
519 : NEVER_READ_ONLY_SPACE
520 : // All code kinds and INTERPRETED_FUNCTION.
521 : enum Kind {
522 : #define DEFINE_CODE_KIND_ENUM(name) name,
523 : CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
524 : #undef DEFINE_CODE_KIND_ENUM
525 : INTERPRETED_FUNCTION,
526 : NUMBER_OF_KINDS
527 : };
528 :
529 : static const char* Kind2String(Kind kind);
530 :
531 : int SourcePosition(int offset);
532 : int SourceStatementPosition(int offset);
533 :
534 : // Returns the address of the first instruction.
535 : inline Address raw_instruction_start();
536 :
537 : // Returns the address of the first instruction. For off-heap code objects
538 : // this differs from instruction_start (which would point to the off-heap
539 : // trampoline instead).
540 : inline Address InstructionStart();
541 :
542 : // Returns the address right after the last instruction.
543 : inline Address raw_instruction_end();
544 :
545 : // Returns the address right after the last instruction. For off-heap code
546 : // objects this differs from instruction_end (which would point to the
547 : // off-heap trampoline instead).
548 : inline Address InstructionEnd();
549 :
550 : // Returns the size of the code instructions.
551 : inline int raw_instruction_size();
552 :
553 : // Returns the size of the native instructions, including embedded
554 : // data such as the safepoints table. For off-heap code objects
555 : // this may from instruction_size in that this will return the size of the
556 : // off-heap instruction stream rather than the on-heap trampoline located
557 : // at instruction_start.
558 : inline int InstructionSize();
559 :
560 : // Return the source position table.
561 : inline ByteArray source_position_table();
562 :
563 : inline Object stack_frame_cache();
564 : static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
565 : Handle<SimpleNumberDictionary> cache);
566 : void DropStackFrameCache();
567 :
568 : // Returns the size of instructions and the metadata.
569 : inline int SizeIncludingMetadata();
570 :
571 : // Returns true if pc is inside this object's instructions.
572 : inline bool contains(Address pc);
573 :
574 : // Returns the AbstractCode::Kind of the code.
575 : inline Kind kind();
576 :
577 : // Calculate the size of the code object to report for log events. This takes
578 : // the layout of the code object into account.
579 : inline int ExecutableSize();
580 :
581 : DECL_CAST(AbstractCode)
582 : inline Code GetCode();
583 : inline BytecodeArray GetBytecodeArray();
584 :
585 : // Max loop nesting marker used to postpose OSR. We don't take loop
586 : // nesting that is deeper than 5 levels into account.
587 : static const int kMaxLoopNestingMarker = 6;
588 :
589 : OBJECT_CONSTRUCTORS(AbstractCode, HeapObject)
590 : };
591 :
592 : // Dependent code is a singly linked list of weak fixed arrays. Each array
593 : // contains weak pointers to code objects for one dependent group. The suffix of
594 : // the array can be filled with the undefined value if the number of codes is
595 : // less than the length of the array.
596 : //
597 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
598 : // | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
599 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
600 : // |
601 : // V
602 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
603 : // | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
604 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
605 : // |
606 : // V
607 : // empty_weak_fixed_array()
608 : //
609 : // The list of weak fixed arrays is ordered by dependency groups.
610 :
611 : class DependentCode : public WeakFixedArray {
612 : public:
613 : DECL_CAST(DependentCode)
614 :
615 : enum DependencyGroup {
616 : // Group of code that embed a transition to this map, and depend on being
617 : // deoptimized when the transition is replaced by a new version.
618 : kTransitionGroup,
619 : // Group of code that omit run-time prototype checks for prototypes
620 : // described by this map. The group is deoptimized whenever an object
621 : // described by this map changes shape (and transitions to a new map),
622 : // possibly invalidating the assumptions embedded in the code.
623 : kPrototypeCheckGroup,
624 : // Group of code that depends on global property values in property cells
625 : // not being changed.
626 : kPropertyCellChangedGroup,
627 : // Group of code that omit run-time checks for field(s) introduced by
628 : // this map, i.e. for the field type.
629 : kFieldOwnerGroup,
630 : // Group of code that omit run-time type checks for initial maps of
631 : // constructors.
632 : kInitialMapChangedGroup,
633 : // Group of code that depends on tenuring information in AllocationSites
634 : // not being changed.
635 : kAllocationSiteTenuringChangedGroup,
636 : // Group of code that depends on element transition information in
637 : // AllocationSites not being changed.
638 : kAllocationSiteTransitionChangedGroup
639 : };
640 :
641 : // Register a code dependency of {cell} on {object}.
642 : static void InstallDependency(Isolate* isolate, const MaybeObjectHandle& code,
643 : Handle<HeapObject> object,
644 : DependencyGroup group);
645 :
646 : void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group);
647 :
648 : bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group);
649 :
650 : // The following low-level accessors are exposed only for tests.
651 : inline DependencyGroup group();
652 : inline MaybeObject object_at(int i);
653 : inline int count();
654 : inline DependentCode next_link();
655 :
656 : private:
657 : static const char* DependencyGroupName(DependencyGroup group);
658 :
659 : // Get/Set {object}'s {DependentCode}.
660 : static DependentCode GetDependentCode(Handle<HeapObject> object);
661 : static void SetDependentCode(Handle<HeapObject> object,
662 : Handle<DependentCode> dep);
663 :
664 : static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group,
665 : const MaybeObjectHandle& object,
666 : Handle<DependentCode> next);
667 : static Handle<DependentCode> EnsureSpace(Isolate* isolate,
668 : Handle<DependentCode> entries);
669 : static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
670 : Handle<DependentCode> entries,
671 : DependencyGroup group,
672 : const MaybeObjectHandle& code);
673 :
674 : // Compact by removing cleared weak cells and return true if there was
675 : // any cleared weak cell.
676 : bool Compact();
677 :
678 : static int Grow(int number_of_entries) {
679 96852 : if (number_of_entries < 5) return number_of_entries + 1;
680 28872 : return number_of_entries * 5 / 4;
681 : }
682 :
683 : static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
684 : static const int kNextLinkIndex = 0;
685 : static const int kFlagsIndex = 1;
686 : static const int kCodesStartIndex = 2;
687 :
688 : inline void set_next_link(DependentCode next);
689 : inline void set_count(int value);
690 : inline void set_object_at(int i, MaybeObject object);
691 : inline void clear_at(int i);
692 : inline void copy(int from, int to);
693 :
694 : inline int flags();
695 : inline void set_flags(int flags);
696 : class GroupField : public BitField<int, 0, 3> {};
697 : class CountField : public BitField<int, 3, 27> {};
698 : STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
699 :
700 : OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray)
701 : };
702 :
703 : // BytecodeArray represents a sequence of interpreter bytecodes.
704 : class BytecodeArray : public FixedArrayBase {
705 : public:
706 : enum Age {
707 : kNoAgeBytecodeAge = 0,
708 : kQuadragenarianBytecodeAge,
709 : kQuinquagenarianBytecodeAge,
710 : kSexagenarianBytecodeAge,
711 : kSeptuagenarianBytecodeAge,
712 : kOctogenarianBytecodeAge,
713 : kAfterLastBytecodeAge,
714 : kFirstBytecodeAge = kNoAgeBytecodeAge,
715 : kLastBytecodeAge = kAfterLastBytecodeAge - 1,
716 : kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
717 : kIsOldBytecodeAge = kSexagenarianBytecodeAge
718 : };
719 :
720 : static constexpr int SizeFor(int length) {
721 30965948 : return OBJECT_POINTER_ALIGN(kHeaderSize + length);
722 : }
723 :
724 : // Setter and getter
725 : inline byte get(int index);
726 : inline void set(int index, byte value);
727 :
728 : // Returns data start address.
729 : inline Address GetFirstBytecodeAddress();
730 :
731 : // Accessors for frame size.
732 : inline int frame_size() const;
733 : inline void set_frame_size(int frame_size);
734 :
735 : // Accessor for register count (derived from frame_size).
736 : inline int register_count() const;
737 :
738 : // Accessors for parameter count (including implicit 'this' receiver).
739 : inline int parameter_count() const;
740 : inline void set_parameter_count(int number_of_parameters);
741 :
742 : // Register used to pass the incoming new.target or generator object from the
743 : // fucntion call.
744 : inline interpreter::Register incoming_new_target_or_generator_register()
745 : const;
746 : inline void set_incoming_new_target_or_generator_register(
747 : interpreter::Register incoming_new_target_or_generator_register);
748 :
749 : // Accessors for profiling count.
750 : inline int interrupt_budget() const;
751 : inline void set_interrupt_budget(int interrupt_budget);
752 :
753 : // Accessors for OSR loop nesting level.
754 : inline int osr_loop_nesting_level() const;
755 : inline void set_osr_loop_nesting_level(int depth);
756 :
757 : // Accessors for bytecode's code age.
758 : inline Age bytecode_age() const;
759 : inline void set_bytecode_age(Age age);
760 :
761 : // Accessors for the constant pool.
762 : DECL_ACCESSORS(constant_pool, FixedArray)
763 :
764 : // Accessors for handler table containing offsets of exception handlers.
765 : DECL_ACCESSORS(handler_table, ByteArray)
766 :
767 : // Accessors for source position table containing mappings between byte code
768 : // offset and source position or SourcePositionTableWithFrameCache.
769 : DECL_ACCESSORS(source_position_table, Object)
770 :
771 : inline ByteArray SourcePositionTable();
772 : inline void ClearFrameCacheFromSourcePositionTable();
773 :
774 : DECL_CAST(BytecodeArray)
775 :
776 : // Dispatched behavior.
777 : inline int BytecodeArraySize();
778 :
779 : inline int raw_instruction_size();
780 :
781 : // Returns the size of bytecode and its metadata. This includes the size of
782 : // bytecode, constant pool, source position table, and handler table.
783 : inline int SizeIncludingMetadata();
784 :
785 : int SourcePosition(int offset);
786 : int SourceStatementPosition(int offset);
787 :
788 : DECL_PRINTER(BytecodeArray)
789 : DECL_VERIFIER(BytecodeArray)
790 :
791 : void Disassemble(std::ostream& os);
792 :
793 : void CopyBytecodesTo(BytecodeArray to);
794 :
795 : // Bytecode aging
796 : bool IsOld() const;
797 : void MakeOlder();
798 :
799 : // Clear uninitialized padding space. This ensures that the snapshot content
800 : // is deterministic.
801 : inline void clear_padding();
802 :
803 : // Layout description.
804 : #define BYTECODE_ARRAY_FIELDS(V) \
805 : /* Pointer fields. */ \
806 : V(kConstantPoolOffset, kTaggedSize) \
807 : V(kHandlerTableOffset, kTaggedSize) \
808 : V(kSourcePositionTableOffset, kTaggedSize) \
809 : V(kFrameSizeOffset, kIntSize) \
810 : V(kParameterSizeOffset, kIntSize) \
811 : V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
812 : V(kInterruptBudgetOffset, kIntSize) \
813 : V(kOSRNestingLevelOffset, kCharSize) \
814 : V(kBytecodeAgeOffset, kCharSize) \
815 : /* Total size. */ \
816 : V(kHeaderSize, 0)
817 :
818 : DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
819 : BYTECODE_ARRAY_FIELDS)
820 : #undef BYTECODE_ARRAY_FIELDS
821 :
822 : // Maximal memory consumption for a single BytecodeArray.
823 : static const int kMaxSize = 512 * MB;
824 : // Maximal length of a single BytecodeArray.
825 : static const int kMaxLength = kMaxSize - kHeaderSize;
826 :
827 : class BodyDescriptor;
828 :
829 1749553 : OBJECT_CONSTRUCTORS(BytecodeArray, FixedArrayBase);
830 : };
831 :
832 : // DeoptimizationData is a fixed array used to hold the deoptimization data for
833 : // optimized code. It also contains information about functions that were
834 : // inlined. If N different functions were inlined then the first N elements of
835 : // the literal array will contain these functions.
836 : //
837 : // It can be empty.
838 : class DeoptimizationData : public FixedArray {
839 : public:
840 : // Layout description. Indices in the array.
841 : static const int kTranslationByteArrayIndex = 0;
842 : static const int kInlinedFunctionCountIndex = 1;
843 : static const int kLiteralArrayIndex = 2;
844 : static const int kOsrBytecodeOffsetIndex = 3;
845 : static const int kOsrPcOffsetIndex = 4;
846 : static const int kOptimizationIdIndex = 5;
847 : static const int kSharedFunctionInfoIndex = 6;
848 : static const int kInliningPositionsIndex = 7;
849 : static const int kFirstDeoptEntryIndex = 8;
850 :
851 : // Offsets of deopt entry elements relative to the start of the entry.
852 : static const int kBytecodeOffsetRawOffset = 0;
853 : static const int kTranslationIndexOffset = 1;
854 : static const int kPcOffset = 2;
855 : static const int kDeoptEntrySize = 3;
856 :
857 : // Simple element accessors.
858 : #define DECL_ELEMENT_ACCESSORS(name, type) \
859 : inline type name() const; \
860 : inline void Set##name(type value);
861 :
862 : DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
863 : DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
864 : DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
865 : DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
866 : DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
867 : DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
868 : DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
869 : DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
870 :
871 : #undef DECL_ELEMENT_ACCESSORS
872 :
873 : // Accessors for elements of the ith deoptimization entry.
874 : #define DECL_ENTRY_ACCESSORS(name, type) \
875 : inline type name(int i) const; \
876 : inline void Set##name(int i, type value);
877 :
878 : DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
879 : DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
880 : DECL_ENTRY_ACCESSORS(Pc, Smi)
881 :
882 : #undef DECL_ENTRY_ACCESSORS
883 :
884 : inline BailoutId BytecodeOffset(int i);
885 :
886 : inline void SetBytecodeOffset(int i, BailoutId value);
887 :
888 : inline int DeoptCount();
889 :
890 : static const int kNotInlinedIndex = -1;
891 :
892 : // Returns the inlined function at the given position in LiteralArray, or the
893 : // outer function if index == kNotInlinedIndex.
894 : class SharedFunctionInfo GetInlinedFunction(int index);
895 :
896 : // Allocates a DeoptimizationData.
897 : static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count,
898 : PretenureFlag pretenure);
899 :
900 : // Return an empty DeoptimizationData.
901 : static Handle<DeoptimizationData> Empty(Isolate* isolate);
902 :
903 : DECL_CAST(DeoptimizationData)
904 :
905 : #ifdef ENABLE_DISASSEMBLER
906 : void DeoptimizationDataPrint(std::ostream& os); // NOLINT
907 : #endif
908 :
909 : private:
910 : static int IndexForEntry(int i) {
911 10343271 : return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
912 : }
913 :
914 : static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
915 :
916 4903 : OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray)
917 : };
918 :
919 : class SourcePositionTableWithFrameCache : public Tuple2 {
920 : public:
921 : DECL_ACCESSORS(source_position_table, ByteArray)
922 : DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
923 :
924 : DECL_CAST(SourcePositionTableWithFrameCache)
925 :
926 : // Layout description.
927 : #define SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS(V) \
928 : V(kSourcePositionTableIndex, kTaggedSize) \
929 : V(kStackFrameCacheIndex, kTaggedSize) \
930 : /* Total size. */ \
931 : V(kSize, 0)
932 :
933 : DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
934 : SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS)
935 : #undef SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS
936 :
937 : OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Tuple2);
938 : };
939 :
940 : } // namespace internal
941 : } // namespace v8
942 :
943 : #include "src/objects/object-macros-undef.h"
944 :
945 : #endif // V8_OBJECTS_CODE_H_
|