Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_OBJECTS_CODE_H_
6 : #define V8_OBJECTS_CODE_H_
7 :
8 : #include "src/objects.h"
9 :
10 : // Has to be the last include (doesn't have include guards):
11 : #include "src/objects/object-macros.h"
12 :
13 : namespace v8 {
14 : namespace internal {
15 :
16 : class ByteArray;
17 : class BytecodeArray;
18 :
19 : // HandlerTable is a fixed array containing entries for exception handlers in
20 : // the code object it is associated with. The tables comes in two flavors:
21 : // 1) Based on ranges: Used for unoptimized code. Contains one entry per
22 : // exception handler and a range representing the try-block covered by that
23 : // handler. Layout looks as follows:
24 : // [ range-start , range-end , handler-offset , handler-data ]
25 : // 2) Based on return addresses: Used for turbofanned code. Contains one entry
26 : // per call-site that could throw an exception. Layout looks as follows:
27 : // [ return-address-offset , handler-offset ]
28 : class HandlerTable : public FixedArray {
29 : public:
30 : // Conservative prediction whether a given handler will locally catch an
31 : // exception or cause a re-throw to outside the code boundary. Since this is
32 : // undecidable it is merely an approximation (e.g. useful for debugger).
33 : enum CatchPrediction {
34 : UNCAUGHT, // The handler will (likely) rethrow the exception.
35 : CAUGHT, // The exception will be caught by the handler.
36 : PROMISE, // The exception will be caught and cause a promise rejection.
37 : DESUGARING, // The exception will be caught, but both the exception and the
38 : // catching are part of a desugaring and should therefore not
39 : // be visible to the user (we won't notify the debugger of such
40 : // exceptions).
41 : ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
42 : // in the desugaring of an async function, so special
43 : // async/await handling in the debugger can take place.
44 : };
45 :
46 : // Getters for handler table based on ranges.
47 : inline int GetRangeStart(int index) const;
48 : inline int GetRangeEnd(int index) const;
49 : inline int GetRangeHandler(int index) const;
50 : inline int GetRangeData(int index) const;
51 :
52 : // Setters for handler table based on ranges.
53 : inline void SetRangeStart(int index, int value);
54 : inline void SetRangeEnd(int index, int value);
55 : inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
56 : inline void SetRangeData(int index, int value);
57 :
58 : // Setters for handler table based on return addresses.
59 : inline void SetReturnOffset(int index, int value);
60 : inline void SetReturnHandler(int index, int offset);
61 :
62 : // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
63 : // the start of the potentially throwing instruction (using return addresses
64 : // for this value would be invalid).
65 : int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
66 :
67 : // Lookup handler in a table based on return addresses.
68 : int LookupReturn(int pc_offset);
69 :
70 : // Returns the number of entries in the table.
71 : inline int NumberOfRangeEntries() const;
72 :
73 : // Returns the required length of the underlying fixed array.
74 2153872 : static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
75 12092 : static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
76 :
77 : // Returns an empty handler table.
78 : static Handle<HandlerTable> Empty(Isolate* isolate);
79 :
80 : DECL_CAST(HandlerTable)
81 :
82 : #ifdef ENABLE_DISASSEMBLER
83 : void HandlerTableRangePrint(std::ostream& os); // NOLINT
84 : void HandlerTableReturnPrint(std::ostream& os); // NOLINT
85 : #endif
86 :
87 : private:
88 : // Layout description for handler table based on ranges.
89 : static const int kRangeStartIndex = 0;
90 : static const int kRangeEndIndex = 1;
91 : static const int kRangeHandlerIndex = 2;
92 : static const int kRangeDataIndex = 3;
93 : static const int kRangeEntrySize = 4;
94 :
95 : // Layout description for handler table based on return addresses.
96 : static const int kReturnOffsetIndex = 0;
97 : static const int kReturnHandlerIndex = 1;
98 : static const int kReturnEntrySize = 2;
99 :
100 : // Encoding of the {handler} field.
101 : class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
102 : class HandlerOffsetField : public BitField<int, 3, 29> {};
103 : };
104 :
105 : // Code describes objects with on-the-fly generated machine code.
106 : class Code : public HeapObject {
107 : public:
108 : // Opaque data type for encapsulating code flags like kind, inline
109 : // cache state, and arguments count.
110 : typedef uint32_t Flags;
111 :
112 : #define CODE_KIND_LIST(V) \
113 : V(OPTIMIZED_FUNCTION) \
114 : V(BYTECODE_HANDLER) \
115 : V(STUB) \
116 : V(BUILTIN) \
117 : V(REGEXP) \
118 : V(WASM_FUNCTION) \
119 : V(WASM_TO_JS_FUNCTION) \
120 : V(JS_TO_WASM_FUNCTION) \
121 : V(WASM_INTERPRETER_ENTRY) \
122 : V(C_WASM_ENTRY)
123 :
124 : enum Kind {
125 : #define DEFINE_CODE_KIND_ENUM(name) name,
126 : CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
127 : #undef DEFINE_CODE_KIND_ENUM
128 : NUMBER_OF_KINDS
129 : };
130 :
131 : static const char* Kind2String(Kind kind);
132 :
133 : #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
134 : // Printing
135 : static const char* ICState2String(InlineCacheState state);
136 : #endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
137 :
138 : #ifdef ENABLE_DISASSEMBLER
139 : void Disassemble(const char* name, std::ostream& os); // NOLINT
140 : #endif
141 :
142 : // [instruction_size]: Size of the native instructions
143 : inline int instruction_size() const;
144 : inline void set_instruction_size(int value);
145 :
146 : // [relocation_info]: Code relocation information
147 : DECL_ACCESSORS(relocation_info, ByteArray)
148 : void InvalidateRelocation();
149 : void InvalidateEmbeddedObjects();
150 :
151 : // [handler_table]: Fixed array containing offsets of exception handlers.
152 : DECL_ACCESSORS(handler_table, FixedArray)
153 :
154 : // [deoptimization_data]: Array containing data for deopt.
155 : DECL_ACCESSORS(deoptimization_data, FixedArray)
156 :
157 : // [source_position_table]: ByteArray for the source positions table or
158 : // SourcePositionTableWithFrameCache.
159 : DECL_ACCESSORS(source_position_table, Object)
160 :
161 : inline ByteArray* SourcePositionTable() const;
162 :
163 : // [trap_handler_index]: An index into the trap handler's master list of code
164 : // objects.
165 : DECL_ACCESSORS(trap_handler_index, Smi)
166 :
167 : // [raw_type_feedback_info]: This field stores various things, depending on
168 : // the kind of the code object.
169 : // STUB and ICs => major/minor key as Smi.
170 : // TODO(mvstanton): rename raw_type_feedback_info to stub_key, since the
171 : // field is no longer overloaded.
172 : DECL_ACCESSORS(raw_type_feedback_info, Object)
173 : inline uint32_t stub_key() const;
174 : inline void set_stub_key(uint32_t key);
175 :
176 : // [next_code_link]: Link for lists of optimized or deoptimized code.
177 : // Note that storage for this field is overlapped with typefeedback_info.
178 : DECL_ACCESSORS(next_code_link, Object)
179 :
180 : // [constant_pool offset]: Offset of the constant pool.
181 : // Valid for FLAG_enable_embedded_constant_pool only
182 : inline int constant_pool_offset() const;
183 : inline void set_constant_pool_offset(int offset);
184 :
185 : // Unchecked accessors to be used during GC.
186 : inline ByteArray* unchecked_relocation_info() const;
187 :
188 : inline int relocation_size() const;
189 :
190 : // [kind]: Access to specific code kind.
191 : inline Kind kind() const;
192 : inline void set_kind(Kind kind);
193 :
194 : inline bool is_stub() const;
195 : inline bool is_optimized_code() const;
196 : inline bool is_wasm_code() const;
197 :
198 : inline void set_raw_kind_specific_flags1(int value);
199 : inline void set_raw_kind_specific_flags2(int value);
200 :
201 : // Testers for interpreter builtins.
202 : inline bool is_interpreter_trampoline_builtin() const;
203 :
204 : // Tells whether the code checks the optimization marker in the function's
205 : // feedback vector.
206 : inline bool checks_optimization_marker() const;
207 :
208 : // [has_tagged_params]: For compiled code or builtins: Tells whether the
209 : // outgoing parameters of this code are tagged pointers. True for other kinds.
210 : inline bool has_tagged_params() const;
211 : inline void set_has_tagged_params(bool value);
212 :
213 : // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
214 : // code object was generated by the TurboFan optimizing compiler.
215 : inline bool is_turbofanned() const;
216 : inline void set_is_turbofanned(bool value);
217 :
218 : // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
219 : // embedded objects in code should be treated weakly.
220 : inline bool can_have_weak_objects() const;
221 : inline void set_can_have_weak_objects(bool value);
222 :
223 : // [is_construct_stub]: For kind BUILTIN, tells whether the code object
224 : // represents a hand-written construct stub
225 : // (e.g., NumberConstructor_ConstructStub).
226 : inline bool is_construct_stub() const;
227 : inline void set_is_construct_stub(bool value);
228 :
229 : // [builtin_index]: For builtins, tells which builtin index the code object
230 : // has. The builtin index is a non-negative integer for builtins, and -1
231 : // otherwise.
232 : inline int builtin_index() const;
233 : inline void set_builtin_index(int id);
234 : inline bool is_builtin() const;
235 :
236 : // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
237 : // reserved in the code prologue.
238 : inline unsigned stack_slots() const;
239 : inline void set_stack_slots(unsigned slots);
240 :
241 : // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
242 : // the instruction stream where the safepoint table starts.
243 : inline unsigned safepoint_table_offset() const;
244 : inline void set_safepoint_table_offset(unsigned offset);
245 :
246 : // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
247 : // the code is going to be deoptimized because of dead embedded maps.
248 : inline bool marked_for_deoptimization() const;
249 : inline void set_marked_for_deoptimization(bool flag);
250 :
251 : // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
252 : // the code was already deoptimized.
253 : inline bool deopt_already_counted() const;
254 : inline void set_deopt_already_counted(bool flag);
255 :
256 : // [is_promise_rejection]: For kind BUILTIN tells whether the
257 : // exception thrown by the code will lead to promise rejection or
258 : // uncaught if both this and is_exception_caught is set.
259 : // Use GetBuiltinCatchPrediction to access this.
260 : inline void set_is_promise_rejection(bool flag);
261 :
262 : // [is_exception_caught]: For kind BUILTIN tells whether the
263 : // exception thrown by the code will be caught internally or
264 : // uncaught if both this and is_promise_rejection is set.
265 : // Use GetBuiltinCatchPrediction to access this.
266 : inline void set_is_exception_caught(bool flag);
267 :
268 : // [constant_pool]: The constant pool for this function.
269 : inline Address constant_pool();
270 :
271 : // Get the safepoint entry for the given pc.
272 : SafepointEntry GetSafepointEntry(Address pc);
273 :
274 : // The entire code object including its header is copied verbatim to the
275 : // snapshot so that it can be written in one, fast, memcpy during
276 : // deserialization. The deserializer will overwrite some pointers, rather
277 : // like a runtime linker, but the random allocation addresses used in the
278 : // mksnapshot process would still be present in the unlinked snapshot data,
279 : // which would make snapshot production non-reproducible. This method wipes
280 : // out the to-be-overwritten header data for reproducible snapshots.
281 : inline void WipeOutHeader();
282 :
283 : // Clear uninitialized padding space. This ensures that the snapshot content
284 : // is deterministic.
285 : inline void clear_padding();
286 : // Initialize the flags field. Similar to clear_padding above this ensure that
287 : // the snapshot content is deterministic.
288 : inline void initialize_flags(Kind kind);
289 :
290 : // Convert a target address into a code object.
291 : static inline Code* GetCodeFromTargetAddress(Address address);
292 :
293 : // Convert an entry address into an object.
294 : static inline Object* GetObjectFromEntryAddress(Address location_of_address);
295 :
296 : // Convert a code entry into an object.
297 : static inline Object* GetObjectFromCodeEntry(Address code_entry);
298 :
299 : // Returns the address of the first instruction.
300 : inline byte* instruction_start() const;
301 :
302 : // Returns the address right after the last instruction.
303 : inline byte* instruction_end() const;
304 :
305 : // Returns the size of the instructions, padding, relocation and unwinding
306 : // information.
307 : inline int body_size() const;
308 :
309 : // Returns the size of code and its metadata. This includes the size of code
310 : // relocation information, deoptimization data and handler table.
311 : inline int SizeIncludingMetadata() const;
312 :
313 : // Returns the address of the first relocation info (read backwards!).
314 : inline byte* relocation_start() const;
315 :
316 : // [has_unwinding_info]: Whether this code object has unwinding information.
317 : // If it doesn't, unwinding_information_start() will point to invalid data.
318 : //
319 : // The body of all code objects has the following layout.
320 : //
321 : // +--------------------------+ <-- instruction_start()
322 : // | instructions |
323 : // | ... |
324 : // +--------------------------+
325 : // | relocation info |
326 : // | ... |
327 : // +--------------------------+ <-- instruction_end()
328 : //
329 : // If has_unwinding_info() is false, instruction_end() points to the first
330 : // memory location after the end of the code object. Otherwise, the body
331 : // continues as follows:
332 : //
333 : // +--------------------------+
334 : // | padding to the next |
335 : // | 8-byte aligned address |
336 : // +--------------------------+ <-- instruction_end()
337 : // | [unwinding_info_size] |
338 : // | as uint64_t |
339 : // +--------------------------+ <-- unwinding_info_start()
340 : // | unwinding info |
341 : // | ... |
342 : // +--------------------------+ <-- unwinding_info_end()
343 : //
344 : // and unwinding_info_end() points to the first memory location after the end
345 : // of the code object.
346 : //
347 : DECL_BOOLEAN_ACCESSORS(has_unwinding_info)
348 :
349 : // [unwinding_info_size]: Size of the unwinding information.
350 : inline int unwinding_info_size() const;
351 : inline void set_unwinding_info_size(int value);
352 :
353 : // Returns the address of the unwinding information, if any.
354 : inline byte* unwinding_info_start() const;
355 :
356 : // Returns the address right after the end of the unwinding information.
357 : inline byte* unwinding_info_end() const;
358 :
359 : // Code entry point.
360 : inline byte* entry() const;
361 :
362 : // Returns true if pc is inside this object's instructions.
363 : inline bool contains(byte* pc);
364 :
365 : // Relocate the code by delta bytes. Called to signal that this code
366 : // object has been moved by delta bytes.
367 : void Relocate(intptr_t delta);
368 :
369 : // Migrate code described by desc.
370 : void CopyFrom(const CodeDesc& desc);
371 :
372 : // Returns the object size for a given body (used for allocation).
373 : static int SizeFor(int body_size) {
374 : DCHECK_SIZE_TAG_ALIGNED(body_size);
375 255869058 : return RoundUp(kHeaderSize + body_size, kCodeAlignment);
376 : }
377 :
378 : // Calculate the size of the code object to report for log events. This takes
379 : // the layout of the code object into account.
380 : inline int ExecutableSize() const;
381 :
382 : DECL_CAST(Code)
383 :
384 : // Dispatched behavior.
385 : inline int CodeSize() const;
386 :
387 : DECL_PRINTER(Code)
388 : DECL_VERIFIER(Code)
389 :
390 : void PrintDeoptLocation(FILE* out, Address pc);
391 : bool CanDeoptAt(Address pc);
392 :
393 : inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
394 : #ifdef VERIFY_HEAP
395 : void VerifyEmbeddedObjectsDependency();
396 : #endif
397 :
398 : #ifdef DEBUG
399 : enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
400 : void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
401 : #endif // DEBUG
402 :
403 : inline bool CanContainWeakObjects();
404 :
405 : inline bool IsWeakObject(Object* object);
406 :
407 : static inline bool IsWeakObjectInOptimizedCode(Object* object);
408 :
409 : static Handle<WeakCell> WeakCellFor(Handle<Code> code);
410 : WeakCell* CachedWeakCell();
411 :
412 : // Return true if the function is inlined in the code.
413 : bool Inlines(SharedFunctionInfo* sfi);
414 :
415 : class OptimizedCodeIterator {
416 : public:
417 : explicit OptimizedCodeIterator(Isolate* isolate);
418 : Code* Next();
419 :
420 : private:
421 : Context* next_context_;
422 : Code* current_code_;
423 : Isolate* isolate_;
424 :
425 : DisallowHeapAllocation no_gc;
426 : DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
427 : };
428 :
429 : static const int kConstantPoolSize =
430 : FLAG_enable_embedded_constant_pool ? kIntSize : 0;
431 :
432 : // Layout description.
433 : static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
434 : static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
435 : static const int kDeoptimizationDataOffset =
436 : kHandlerTableOffset + kPointerSize;
437 : static const int kSourcePositionTableOffset =
438 : kDeoptimizationDataOffset + kPointerSize;
439 : // For FUNCTION kind, we store the type feedback info here.
440 : static const int kTypeFeedbackInfoOffset =
441 : kSourcePositionTableOffset + kPointerSize;
442 : static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
443 : static const int kInstructionSizeOffset = kNextCodeLinkOffset + kPointerSize;
444 : static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
445 : static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
446 : static const int kKindSpecificFlags2Offset =
447 : kKindSpecificFlags1Offset + kIntSize;
448 : static const int kConstantPoolOffset = kKindSpecificFlags2Offset + kIntSize;
449 : static const int kBuiltinIndexOffset =
450 : kConstantPoolOffset + kConstantPoolSize;
451 : static const int kTrapHandlerIndex = kBuiltinIndexOffset + kIntSize;
452 : static const int kHeaderPaddingStart = kTrapHandlerIndex + kPointerSize;
453 :
454 : // Add padding to align the instruction start following right after
455 : // the Code object header.
456 : static const int kHeaderSize =
457 : (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
458 :
459 : // Data or code not directly visited by GC directly starts here.
460 : // The serializer needs to copy bytes starting from here verbatim.
461 : // Objects embedded into code is visited via reloc info.
462 : static const int kDataStart = kInstructionSizeOffset;
463 :
464 : inline int GetUnwindingInfoSizeOffset() const;
465 :
466 : class BodyDescriptor;
467 :
468 : // Flags layout. BitField<type, shift, size>.
469 : class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
470 : class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
471 : STATIC_ASSERT(NUMBER_OF_KINDS <= KindField::kMax);
472 :
473 : // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
474 : static const int kStackSlotsFirstBit = 0;
475 : static const int kStackSlotsBitCount = 24;
476 : static const int kMarkedForDeoptimizationBit =
477 : kStackSlotsFirstBit + kStackSlotsBitCount;
478 : static const int kDeoptAlreadyCountedBit = kMarkedForDeoptimizationBit + 1;
479 : static const int kIsTurbofannedBit = kDeoptAlreadyCountedBit + 1;
480 : static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
481 : // Could be moved to overlap previous bits when we need more space.
482 : static const int kIsConstructStub = kCanHaveWeakObjects + 1;
483 : static const int kIsPromiseRejection = kIsConstructStub + 1;
484 : static const int kIsExceptionCaught = kIsPromiseRejection + 1;
485 :
486 : STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
487 : STATIC_ASSERT(kIsExceptionCaught + 1 <= 32);
488 :
489 : class StackSlotsField
490 : : public BitField<int, kStackSlotsFirstBit, kStackSlotsBitCount> {
491 : }; // NOLINT
492 : class MarkedForDeoptimizationField
493 : : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
494 : class DeoptAlreadyCountedField
495 : : public BitField<bool, kDeoptAlreadyCountedBit, 1> {}; // NOLINT
496 : class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
497 : }; // NOLINT
498 : class CanHaveWeakObjectsField
499 : : public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
500 : class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
501 : }; // NOLINT
502 : class IsPromiseRejectionField
503 : : public BitField<bool, kIsPromiseRejection, 1> {}; // NOLINT
504 : class IsExceptionCaughtField : public BitField<bool, kIsExceptionCaught, 1> {
505 : }; // NOLINT
506 :
507 : // KindSpecificFlags2 layout (ALL)
508 : static const int kHasTaggedStackBit = 0;
509 : class HasTaggedStackField : public BitField<bool, kHasTaggedStackBit, 1> {};
510 :
511 : // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
512 : static const int kSafepointTableOffsetFirstBit = kHasTaggedStackBit + 1;
513 : static const int kSafepointTableOffsetBitCount = 30;
514 :
515 : STATIC_ASSERT(kSafepointTableOffsetFirstBit + kSafepointTableOffsetBitCount <=
516 : 32);
517 : STATIC_ASSERT(1 + kSafepointTableOffsetBitCount <= 32);
518 :
519 : class SafepointTableOffsetField
520 : : public BitField<int, kSafepointTableOffsetFirstBit,
521 : kSafepointTableOffsetBitCount> {}; // NOLINT
522 :
523 : static const int kArgumentsBits = 16;
524 : static const int kMaxArguments = (1 << kArgumentsBits) - 1;
525 :
526 : private:
527 : friend class RelocIterator;
528 :
529 : bool is_promise_rejection() const;
530 : bool is_exception_caught() const;
531 :
532 : DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
533 : };
534 :
535 : class AbstractCode : public HeapObject {
536 : public:
537 : // All code kinds and INTERPRETED_FUNCTION.
538 : enum Kind {
539 : #define DEFINE_CODE_KIND_ENUM(name) name,
540 : CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
541 : #undef DEFINE_CODE_KIND_ENUM
542 : INTERPRETED_FUNCTION,
543 : NUMBER_OF_KINDS
544 : };
545 :
546 : static const char* Kind2String(Kind kind);
547 :
548 : int SourcePosition(int offset);
549 : int SourceStatementPosition(int offset);
550 :
551 : // Returns the address of the first instruction.
552 : inline Address instruction_start();
553 :
554 : // Returns the address right after the last instruction.
555 : inline Address instruction_end();
556 :
557 : // Returns the size of the code instructions.
558 : inline int instruction_size();
559 :
560 : // Return the source position table.
561 : inline ByteArray* source_position_table();
562 :
563 : inline Object* stack_frame_cache();
564 : static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
565 : Handle<UnseededNumberDictionary> cache);
566 : void DropStackFrameCache();
567 :
568 : // Returns the size of instructions and the metadata.
569 : inline int SizeIncludingMetadata();
570 :
571 : // Returns true if pc is inside this object's instructions.
572 : inline bool contains(byte* pc);
573 :
574 : // Returns the AbstractCode::Kind of the code.
575 : inline Kind kind();
576 :
577 : // Calculate the size of the code object to report for log events. This takes
578 : // the layout of the code object into account.
579 : inline int ExecutableSize();
580 :
581 : DECL_CAST(AbstractCode)
582 : inline Code* GetCode();
583 : inline BytecodeArray* GetBytecodeArray();
584 :
585 : // Max loop nesting marker used to postpose OSR. We don't take loop
586 : // nesting that is deeper than 5 levels into account.
587 : static const int kMaxLoopNestingMarker = 6;
588 : };
589 :
590 : // Dependent code is a singly linked list of fixed arrays. Each array contains
591 : // code objects in weak cells for one dependent group. The suffix of the array
592 : // can be filled with the undefined value if the number of codes is less than
593 : // the length of the array.
594 : //
595 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
596 : // | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
597 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
598 : // |
599 : // V
600 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
601 : // | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
602 : // +------+-----------------+--------+--------+-----+--------+-----------+-----+
603 : // |
604 : // V
605 : // empty_fixed_array()
606 : //
607 : // The list of fixed arrays is ordered by dependency groups.
608 :
609 : class DependentCode : public FixedArray {
610 : public:
611 : enum DependencyGroup {
612 : // Group of code that weakly embed this map and depend on being
613 : // deoptimized when the map is garbage collected.
614 : kWeakCodeGroup,
615 : // Group of code that embed a transition to this map, and depend on being
616 : // deoptimized when the transition is replaced by a new version.
617 : kTransitionGroup,
618 : // Group of code that omit run-time prototype checks for prototypes
619 : // described by this map. The group is deoptimized whenever an object
620 : // described by this map changes shape (and transitions to a new map),
621 : // possibly invalidating the assumptions embedded in the code.
622 : kPrototypeCheckGroup,
623 : // Group of code that depends on global property values in property cells
624 : // not being changed.
625 : kPropertyCellChangedGroup,
626 : // Group of code that omit run-time checks for field(s) introduced by
627 : // this map, i.e. for the field type.
628 : kFieldOwnerGroup,
629 : // Group of code that omit run-time type checks for initial maps of
630 : // constructors.
631 : kInitialMapChangedGroup,
632 : // Group of code that depends on tenuring information in AllocationSites
633 : // not being changed.
634 : kAllocationSiteTenuringChangedGroup,
635 : // Group of code that depends on element transition information in
636 : // AllocationSites not being changed.
637 : kAllocationSiteTransitionChangedGroup
638 : };
639 :
640 : static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
641 : static const int kNextLinkIndex = 0;
642 : static const int kFlagsIndex = 1;
643 : static const int kCodesStartIndex = 2;
644 :
645 : bool Contains(DependencyGroup group, WeakCell* code_cell);
646 : bool IsEmpty(DependencyGroup group);
647 :
648 : static Handle<DependentCode> InsertCompilationDependencies(
649 : Handle<DependentCode> entries, DependencyGroup group,
650 : Handle<Foreign> info);
651 :
652 : static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
653 : DependencyGroup group,
654 : Handle<WeakCell> code_cell);
655 :
656 : void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
657 : WeakCell* code_cell);
658 :
659 : void RemoveCompilationDependencies(DependentCode::DependencyGroup group,
660 : Foreign* info);
661 :
662 : void DeoptimizeDependentCodeGroup(Isolate* isolate,
663 : DependentCode::DependencyGroup group);
664 :
665 : bool MarkCodeForDeoptimization(Isolate* isolate,
666 : DependentCode::DependencyGroup group);
667 :
668 : // The following low-level accessors should only be used by this class
669 : // and the mark compact collector.
670 : inline DependentCode* next_link();
671 : inline void set_next_link(DependentCode* next);
672 : inline int count();
673 : inline void set_count(int value);
674 : inline DependencyGroup group();
675 : inline void set_group(DependencyGroup group);
676 : inline Object* object_at(int i);
677 : inline void set_object_at(int i, Object* object);
678 : inline void clear_at(int i);
679 : inline void copy(int from, int to);
680 : DECL_CAST(DependentCode)
681 :
682 : static const char* DependencyGroupName(DependencyGroup group);
683 : static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
684 :
685 : private:
686 : static Handle<DependentCode> Insert(Handle<DependentCode> entries,
687 : DependencyGroup group,
688 : Handle<Object> object);
689 : static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
690 : Handle<DependentCode> next);
691 : static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
692 : // Compact by removing cleared weak cells and return true if there was
693 : // any cleared weak cell.
694 : bool Compact();
695 : static int Grow(int number_of_entries) {
696 154498 : if (number_of_entries < 5) return number_of_entries + 1;
697 52356 : return number_of_entries * 5 / 4;
698 : }
699 : inline int flags();
700 : inline void set_flags(int flags);
701 : class GroupField : public BitField<int, 0, 3> {};
702 : class CountField : public BitField<int, 3, 27> {};
703 : STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
704 : };
705 :
706 : // BytecodeArray represents a sequence of interpreter bytecodes.
707 : class BytecodeArray : public FixedArrayBase {
708 : public:
709 : enum Age {
710 : kNoAgeBytecodeAge = 0,
711 : kQuadragenarianBytecodeAge,
712 : kQuinquagenarianBytecodeAge,
713 : kSexagenarianBytecodeAge,
714 : kSeptuagenarianBytecodeAge,
715 : kOctogenarianBytecodeAge,
716 : kAfterLastBytecodeAge,
717 : kFirstBytecodeAge = kNoAgeBytecodeAge,
718 : kLastBytecodeAge = kAfterLastBytecodeAge - 1,
719 : kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
720 : kIsOldBytecodeAge = kSexagenarianBytecodeAge
721 : };
722 :
723 : static int SizeFor(int length) {
724 13665803 : return OBJECT_POINTER_ALIGN(kHeaderSize + length);
725 : }
726 :
727 : // Setter and getter
728 : inline byte get(int index);
729 : inline void set(int index, byte value);
730 :
731 : // Returns data start address.
732 : inline Address GetFirstBytecodeAddress();
733 :
734 : // Accessors for frame size.
735 : inline int frame_size() const;
736 : inline void set_frame_size(int frame_size);
737 :
738 : // Accessor for register count (derived from frame_size).
739 : inline int register_count() const;
740 :
741 : // Accessors for parameter count (including implicit 'this' receiver).
742 : inline int parameter_count() const;
743 : inline void set_parameter_count(int number_of_parameters);
744 :
745 : // Register used to pass the incoming new.target or generator object from the
746 : // fucntion call.
747 : inline interpreter::Register incoming_new_target_or_generator_register()
748 : const;
749 : inline void set_incoming_new_target_or_generator_register(
750 : interpreter::Register incoming_new_target_or_generator_register);
751 :
752 : // Accessors for profiling count.
753 : inline int interrupt_budget() const;
754 : inline void set_interrupt_budget(int interrupt_budget);
755 :
756 : // Accessors for OSR loop nesting level.
757 : inline int osr_loop_nesting_level() const;
758 : inline void set_osr_loop_nesting_level(int depth);
759 :
760 : // Accessors for bytecode's code age.
761 : inline Age bytecode_age() const;
762 : inline void set_bytecode_age(Age age);
763 :
764 : // Accessors for the constant pool.
765 : DECL_ACCESSORS(constant_pool, FixedArray)
766 :
767 : // Accessors for handler table containing offsets of exception handlers.
768 : DECL_ACCESSORS(handler_table, FixedArray)
769 :
770 : // Accessors for source position table containing mappings between byte code
771 : // offset and source position or SourcePositionTableWithFrameCache.
772 : DECL_ACCESSORS(source_position_table, Object)
773 :
774 : inline ByteArray* SourcePositionTable();
775 :
776 : DECL_CAST(BytecodeArray)
777 :
778 : // Dispatched behavior.
779 : inline int BytecodeArraySize();
780 :
781 : inline int instruction_size();
782 :
783 : // Returns the size of bytecode and its metadata. This includes the size of
784 : // bytecode, constant pool, source position table, and handler table.
785 : inline int SizeIncludingMetadata();
786 :
787 : int SourcePosition(int offset);
788 : int SourceStatementPosition(int offset);
789 :
790 : DECL_PRINTER(BytecodeArray)
791 : DECL_VERIFIER(BytecodeArray)
792 :
793 : void Disassemble(std::ostream& os);
794 :
795 : void CopyBytecodesTo(BytecodeArray* to);
796 :
797 : // Bytecode aging
798 : bool IsOld() const;
799 : void MakeOlder();
800 :
801 : // Clear uninitialized padding space. This ensures that the snapshot content
802 : // is deterministic.
803 : inline void clear_padding();
804 :
805 : // Layout description.
806 : #define BYTECODE_ARRAY_FIELDS(V) \
807 : /* Pointer fields. */ \
808 : V(kConstantPoolOffset, kPointerSize) \
809 : V(kHandlerTableOffset, kPointerSize) \
810 : V(kSourcePositionTableOffset, kPointerSize) \
811 : V(kFrameSizeOffset, kIntSize) \
812 : V(kParameterSizeOffset, kIntSize) \
813 : V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
814 : V(kInterruptBudgetOffset, kIntSize) \
815 : V(kOSRNestingLevelOffset, kCharSize) \
816 : V(kBytecodeAgeOffset, kCharSize) \
817 : /* Total size. */ \
818 : V(kHeaderSize, 0)
819 :
820 : DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
821 : BYTECODE_ARRAY_FIELDS)
822 : #undef BYTECODE_ARRAY_FIELDS
823 :
824 : // Maximal memory consumption for a single BytecodeArray.
825 : static const int kMaxSize = 512 * MB;
826 : // Maximal length of a single BytecodeArray.
827 : static const int kMaxLength = kMaxSize - kHeaderSize;
828 :
829 : class BodyDescriptor;
830 : // No weak fields.
831 : typedef BodyDescriptor BodyDescriptorWeak;
832 :
833 : private:
834 : DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
835 : };
836 :
837 : // DeoptimizationData is a fixed array used to hold the deoptimization data for
838 : // optimized code. It also contains information about functions that were
839 : // inlined. If N different functions were inlined then the first N elements of
840 : // the literal array will contain these functions.
841 : //
842 : // It can be empty.
843 : class DeoptimizationData : public FixedArray {
844 : public:
845 : // Layout description. Indices in the array.
846 : static const int kTranslationByteArrayIndex = 0;
847 : static const int kInlinedFunctionCountIndex = 1;
848 : static const int kLiteralArrayIndex = 2;
849 : static const int kOsrBytecodeOffsetIndex = 3;
850 : static const int kOsrPcOffsetIndex = 4;
851 : static const int kOptimizationIdIndex = 5;
852 : static const int kSharedFunctionInfoIndex = 6;
853 : static const int kWeakCellCacheIndex = 7;
854 : static const int kInliningPositionsIndex = 8;
855 : static const int kFirstDeoptEntryIndex = 9;
856 :
857 : // Offsets of deopt entry elements relative to the start of the entry.
858 : static const int kBytecodeOffsetRawOffset = 0;
859 : static const int kTranslationIndexOffset = 1;
860 : static const int kPcOffset = 2;
861 : static const int kDeoptEntrySize = 3;
862 :
863 : // Simple element accessors.
864 : #define DECL_ELEMENT_ACCESSORS(name, type) \
865 : inline type* name(); \
866 : inline void Set##name(type* value);
867 :
868 : DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
869 : DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
870 : DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
871 : DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
872 : DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
873 : DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
874 : DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
875 : DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
876 : DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
877 :
878 : #undef DECL_ELEMENT_ACCESSORS
879 :
880 : // Accessors for elements of the ith deoptimization entry.
881 : #define DECL_ENTRY_ACCESSORS(name, type) \
882 : inline type* name(int i); \
883 : inline void Set##name(int i, type* value);
884 :
885 : DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
886 : DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
887 : DECL_ENTRY_ACCESSORS(Pc, Smi)
888 :
889 : #undef DECL_ENTRY_ACCESSORS
890 :
891 : inline BailoutId BytecodeOffset(int i);
892 :
893 : inline void SetBytecodeOffset(int i, BailoutId value);
894 :
895 : inline int DeoptCount();
896 :
897 : static const int kNotInlinedIndex = -1;
898 :
899 : // Returns the inlined function at the given position in LiteralArray, or the
900 : // outer function if index == kNotInlinedIndex.
901 : class SharedFunctionInfo* GetInlinedFunction(int index);
902 :
903 : // Allocates a DeoptimizationData.
904 : static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count,
905 : PretenureFlag pretenure);
906 :
907 : // Return an empty DeoptimizationData.
908 : static Handle<DeoptimizationData> Empty(Isolate* isolate);
909 :
910 : DECL_CAST(DeoptimizationData)
911 :
912 : #ifdef ENABLE_DISASSEMBLER
913 : void DeoptimizationDataPrint(std::ostream& os); // NOLINT
914 : #endif
915 :
916 : private:
917 : static int IndexForEntry(int i) {
918 11273987 : return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
919 : }
920 :
921 : static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
922 : };
923 :
924 : } // namespace internal
925 : } // namespace v8
926 :
927 : #include "src/objects/object-macros-undef.h"
928 :
929 : #endif // V8_OBJECTS_CODE_H_
|