Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_HEAP_H_
6 : #define V8_HEAP_HEAP_H_
7 :
8 : #include <cmath>
9 : #include <map>
10 :
11 : // Clients of this interface shouldn't depend on lots of heap internals.
12 : // Do not include anything from src/heap here!
13 : #include "include/v8.h"
14 : #include "src/allocation.h"
15 : #include "src/assert-scope.h"
16 : #include "src/base/atomic-utils.h"
17 : #include "src/debug/debug-interface.h"
18 : #include "src/globals.h"
19 : #include "src/heap-symbols.h"
20 : #include "src/list.h"
21 : #include "src/objects.h"
22 : #include "src/objects/hash-table.h"
23 : #include "src/objects/string-table.h"
24 : #include "src/visitors.h"
25 :
26 : namespace v8 {
27 : namespace internal {
28 :
29 : using v8::MemoryPressureLevel;
30 :
31 : // Defines all the roots in Heap.
32 : #define STRONG_ROOT_LIST(V) \
33 : /* Cluster the most popular ones in a few cache lines here at the top. */ \
34 : /* The first 32 entries are most often used in the startup snapshot and */ \
35 : /* can use a shorter representation in the serialization format. */ \
36 : V(Map, free_space_map, FreeSpaceMap) \
37 : V(Map, one_pointer_filler_map, OnePointerFillerMap) \
38 : V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
39 : V(Oddball, uninitialized_value, UninitializedValue) \
40 : V(Oddball, undefined_value, UndefinedValue) \
41 : V(Oddball, the_hole_value, TheHoleValue) \
42 : V(Oddball, null_value, NullValue) \
43 : V(Oddball, true_value, TrueValue) \
44 : V(Oddball, false_value, FalseValue) \
45 : V(String, empty_string, empty_string) \
46 : V(Map, meta_map, MetaMap) \
47 : V(Map, byte_array_map, ByteArrayMap) \
48 : V(Map, fixed_array_map, FixedArrayMap) \
49 : V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
50 : V(Map, hash_table_map, HashTableMap) \
51 : V(Map, symbol_map, SymbolMap) \
52 : V(Map, one_byte_string_map, OneByteStringMap) \
53 : V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
54 : V(Map, scope_info_map, ScopeInfoMap) \
55 : V(Map, shared_function_info_map, SharedFunctionInfoMap) \
56 : V(Map, code_map, CodeMap) \
57 : V(Map, function_context_map, FunctionContextMap) \
58 : V(Map, cell_map, CellMap) \
59 : V(Map, weak_cell_map, WeakCellMap) \
60 : V(Map, global_property_cell_map, GlobalPropertyCellMap) \
61 : V(Map, foreign_map, ForeignMap) \
62 : V(Map, heap_number_map, HeapNumberMap) \
63 : V(Map, transition_array_map, TransitionArrayMap) \
64 : V(Map, feedback_vector_map, FeedbackVectorMap) \
65 : V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
66 : V(FixedArray, empty_fixed_array, EmptyFixedArray) \
67 : V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
68 : /* Entries beyond the first 32 */ \
69 : /* The roots above this line should be boring from a GC point of view. */ \
70 : /* This means they are never in new space and never on a page that is */ \
71 : /* being compacted. */ \
72 : /* Oddballs */ \
73 : V(Oddball, arguments_marker, ArgumentsMarker) \
74 : V(Oddball, exception, Exception) \
75 : V(Oddball, termination_exception, TerminationException) \
76 : V(Oddball, optimized_out, OptimizedOut) \
77 : V(Oddball, stale_register, StaleRegister) \
78 : /* Context maps */ \
79 : V(Map, native_context_map, NativeContextMap) \
80 : V(Map, module_context_map, ModuleContextMap) \
81 : V(Map, eval_context_map, EvalContextMap) \
82 : V(Map, script_context_map, ScriptContextMap) \
83 : V(Map, block_context_map, BlockContextMap) \
84 : V(Map, catch_context_map, CatchContextMap) \
85 : V(Map, with_context_map, WithContextMap) \
86 : V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
87 : V(Map, script_context_table_map, ScriptContextTableMap) \
88 : /* Maps */ \
89 : V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
90 : V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
91 : V(Map, ordered_hash_table_map, OrderedHashTableMap) \
92 : V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
93 : V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
94 : V(Map, message_object_map, JSMessageObjectMap) \
95 : V(Map, external_map, ExternalMap) \
96 : V(Map, bytecode_array_map, BytecodeArrayMap) \
97 : V(Map, module_info_map, ModuleInfoMap) \
98 : V(Map, no_closures_cell_map, NoClosuresCellMap) \
99 : V(Map, one_closure_cell_map, OneClosureCellMap) \
100 : V(Map, many_closures_cell_map, ManyClosuresCellMap) \
101 : /* String maps */ \
102 : V(Map, native_source_string_map, NativeSourceStringMap) \
103 : V(Map, string_map, StringMap) \
104 : V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
105 : V(Map, cons_string_map, ConsStringMap) \
106 : V(Map, thin_one_byte_string_map, ThinOneByteStringMap) \
107 : V(Map, thin_string_map, ThinStringMap) \
108 : V(Map, sliced_string_map, SlicedStringMap) \
109 : V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
110 : V(Map, external_string_map, ExternalStringMap) \
111 : V(Map, external_string_with_one_byte_data_map, \
112 : ExternalStringWithOneByteDataMap) \
113 : V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
114 : V(Map, short_external_string_map, ShortExternalStringMap) \
115 : V(Map, short_external_string_with_one_byte_data_map, \
116 : ShortExternalStringWithOneByteDataMap) \
117 : V(Map, internalized_string_map, InternalizedStringMap) \
118 : V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
119 : V(Map, external_internalized_string_with_one_byte_data_map, \
120 : ExternalInternalizedStringWithOneByteDataMap) \
121 : V(Map, external_one_byte_internalized_string_map, \
122 : ExternalOneByteInternalizedStringMap) \
123 : V(Map, short_external_internalized_string_map, \
124 : ShortExternalInternalizedStringMap) \
125 : V(Map, short_external_internalized_string_with_one_byte_data_map, \
126 : ShortExternalInternalizedStringWithOneByteDataMap) \
127 : V(Map, short_external_one_byte_internalized_string_map, \
128 : ShortExternalOneByteInternalizedStringMap) \
129 : V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
130 : /* Array element maps */ \
131 : V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
132 : V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
133 : V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
134 : V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
135 : V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
136 : V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
137 : V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
138 : V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
139 : V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
140 : /* Oddball maps */ \
141 : V(Map, undefined_map, UndefinedMap) \
142 : V(Map, the_hole_map, TheHoleMap) \
143 : V(Map, null_map, NullMap) \
144 : V(Map, boolean_map, BooleanMap) \
145 : V(Map, uninitialized_map, UninitializedMap) \
146 : V(Map, arguments_marker_map, ArgumentsMarkerMap) \
147 : V(Map, exception_map, ExceptionMap) \
148 : V(Map, termination_exception_map, TerminationExceptionMap) \
149 : V(Map, optimized_out_map, OptimizedOutMap) \
150 : V(Map, stale_register_map, StaleRegisterMap) \
151 : /* Canonical empty values */ \
152 : V(ByteArray, empty_byte_array, EmptyByteArray) \
153 : V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
154 : V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
155 : V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
156 : V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
157 : V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
158 : V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
159 : V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
160 : V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
161 : V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
162 : EmptyFixedUint8ClampedArray) \
163 : V(Script, empty_script, EmptyScript) \
164 : V(Cell, undefined_cell, UndefinedCell) \
165 : V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
166 : V(SeededNumberDictionary, empty_slow_element_dictionary, \
167 : EmptySlowElementDictionary) \
168 : V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
169 : V(WeakCell, empty_weak_cell, EmptyWeakCell) \
170 : V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
171 : /* Protectors */ \
172 : V(PropertyCell, array_protector, ArrayProtector) \
173 : V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
174 : V(Cell, species_protector, SpeciesProtector) \
175 : V(PropertyCell, string_length_protector, StringLengthProtector) \
176 : V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
177 : V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
178 : V(PropertyCell, array_buffer_neutering_protector, \
179 : ArrayBufferNeuteringProtector) \
180 : /* Special numbers */ \
181 : V(HeapNumber, nan_value, NanValue) \
182 : V(HeapNumber, hole_nan_value, HoleNanValue) \
183 : V(HeapNumber, infinity_value, InfinityValue) \
184 : V(HeapNumber, minus_zero_value, MinusZeroValue) \
185 : V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
186 : /* Caches */ \
187 : V(FixedArray, number_string_cache, NumberStringCache) \
188 : V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
189 : V(FixedArray, string_split_cache, StringSplitCache) \
190 : V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
191 : V(Object, instanceof_cache_function, InstanceofCacheFunction) \
192 : V(Object, instanceof_cache_map, InstanceofCacheMap) \
193 : V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
194 : /* Lists and dictionaries */ \
195 : V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
196 : V(NameDictionary, public_symbol_table, PublicSymbolTable) \
197 : V(NameDictionary, api_symbol_table, ApiSymbolTable) \
198 : V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
199 : V(Object, script_list, ScriptList) \
200 : V(UnseededNumberDictionary, code_stubs, CodeStubs) \
201 : V(FixedArray, materialized_objects, MaterializedObjects) \
202 : V(FixedArray, microtask_queue, MicrotaskQueue) \
203 : V(FixedArray, detached_contexts, DetachedContexts) \
204 : V(ArrayList, retained_maps, RetainedMaps) \
205 : V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
206 : /* weak_new_space_object_to_code_list is an array of weak cells, where */ \
207 : /* slots with even indices refer to the weak object, and the subsequent */ \
208 : /* slots refer to the code with the reference to the weak object. */ \
209 : V(ArrayList, weak_new_space_object_to_code_list, \
210 : WeakNewSpaceObjectToCodeList) \
211 : /* List to hold onto feedback vectors that we need for code coverage */ \
212 : V(Object, code_coverage_list, CodeCoverageList) \
213 : V(Object, weak_stack_trace_list, WeakStackTraceList) \
214 : V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
215 : V(FixedArray, serialized_templates, SerializedTemplates) \
216 : V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
217 : V(TemplateList, message_listeners, MessageListeners) \
218 : /* per-Isolate map for JSPromiseCapability. */ \
219 : /* TODO(caitp): Make this a Struct */ \
220 : V(Map, js_promise_capability_map, JSPromiseCapabilityMap) \
221 : /* JS Entries */ \
222 : V(Code, js_entry_code, JsEntryCode) \
223 : V(Code, js_construct_entry_code, JsConstructEntryCode)
224 :
225 : // Entries in this list are limited to Smis and are not visited during GC.
226 : #define SMI_ROOT_LIST(V) \
227 : V(Smi, stack_limit, StackLimit) \
228 : V(Smi, real_stack_limit, RealStackLimit) \
229 : V(Smi, last_script_id, LastScriptId) \
230 : V(Smi, hash_seed, HashSeed) \
231 : /* To distinguish the function templates, so that we can find them in the */ \
232 : /* function cache of the native context. */ \
233 : V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
234 : V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
235 : V(Smi, construct_stub_create_deopt_pc_offset, \
236 : ConstructStubCreateDeoptPCOffset) \
237 : V(Smi, construct_stub_invoke_deopt_pc_offset, \
238 : ConstructStubInvokeDeoptPCOffset) \
239 : V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
240 : V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
241 : V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
242 :
243 : #define ROOT_LIST(V) \
244 : STRONG_ROOT_LIST(V) \
245 : SMI_ROOT_LIST(V) \
246 : V(StringTable, string_table, StringTable)
247 :
248 :
249 : // Heap roots that are known to be immortal immovable, for which we can safely
250 : // skip write barriers. This list is not complete and has omissions.
251 : #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
252 : V(ArgumentsMarker) \
253 : V(ArgumentsMarkerMap) \
254 : V(ArrayBufferNeuteringProtector) \
255 : V(ArrayIteratorProtector) \
256 : V(ArrayProtector) \
257 : V(BlockContextMap) \
258 : V(BooleanMap) \
259 : V(ByteArrayMap) \
260 : V(BytecodeArrayMap) \
261 : V(CatchContextMap) \
262 : V(CellMap) \
263 : V(CodeMap) \
264 : V(EmptyByteArray) \
265 : V(EmptyDescriptorArray) \
266 : V(EmptyFixedArray) \
267 : V(EmptyFixedFloat32Array) \
268 : V(EmptyFixedFloat64Array) \
269 : V(EmptyFixedInt16Array) \
270 : V(EmptyFixedInt32Array) \
271 : V(EmptyFixedInt8Array) \
272 : V(EmptyFixedUint16Array) \
273 : V(EmptyFixedUint32Array) \
274 : V(EmptyFixedUint8Array) \
275 : V(EmptyFixedUint8ClampedArray) \
276 : V(EmptyPropertyCell) \
277 : V(EmptyScopeInfo) \
278 : V(EmptyScript) \
279 : V(EmptySloppyArgumentsElements) \
280 : V(EmptySlowElementDictionary) \
281 : V(empty_string) \
282 : V(EmptyWeakCell) \
283 : V(EvalContextMap) \
284 : V(Exception) \
285 : V(FalseValue) \
286 : V(FastArrayIterationProtector) \
287 : V(FixedArrayMap) \
288 : V(FixedCOWArrayMap) \
289 : V(FixedDoubleArrayMap) \
290 : V(ForeignMap) \
291 : V(FreeSpaceMap) \
292 : V(FunctionContextMap) \
293 : V(GlobalPropertyCellMap) \
294 : V(HashTableMap) \
295 : V(HeapNumberMap) \
296 : V(HoleNanValue) \
297 : V(InfinityValue) \
298 : V(IsConcatSpreadableProtector) \
299 : V(JsConstructEntryCode) \
300 : V(JsEntryCode) \
301 : V(JSMessageObjectMap) \
302 : V(ManyClosuresCellMap) \
303 : V(MetaMap) \
304 : V(MinusInfinityValue) \
305 : V(MinusZeroValue) \
306 : V(ModuleContextMap) \
307 : V(ModuleInfoMap) \
308 : V(MutableHeapNumberMap) \
309 : V(NanValue) \
310 : V(NativeContextMap) \
311 : V(NoClosuresCellMap) \
312 : V(NullMap) \
313 : V(NullValue) \
314 : V(OneClosureCellMap) \
315 : V(OnePointerFillerMap) \
316 : V(OptimizedOut) \
317 : V(OrderedHashTableMap) \
318 : V(ScopeInfoMap) \
319 : V(ScriptContextMap) \
320 : V(SharedFunctionInfoMap) \
321 : V(SloppyArgumentsElementsMap) \
322 : V(SpeciesProtector) \
323 : V(StaleRegister) \
324 : V(StringLengthProtector) \
325 : V(SymbolMap) \
326 : V(TerminationException) \
327 : V(TheHoleMap) \
328 : V(TheHoleValue) \
329 : V(TransitionArrayMap) \
330 : V(TrueValue) \
331 : V(TwoPointerFillerMap) \
332 : V(UndefinedCell) \
333 : V(UndefinedMap) \
334 : V(UndefinedValue) \
335 : V(UninitializedMap) \
336 : V(UninitializedValue) \
337 : V(WeakCellMap) \
338 : V(WithContextMap) \
339 : PRIVATE_SYMBOL_LIST(V)
340 :
341 : // Forward declarations.
342 : class AllocationObserver;
343 : class ArrayBufferTracker;
344 : class ConcurrentMarking;
345 : class GCIdleTimeAction;
346 : class GCIdleTimeHandler;
347 : class GCIdleTimeHeapState;
348 : class GCTracer;
349 : class HeapObjectsFilter;
350 : class HeapStats;
351 : class HistogramTimer;
352 : class Isolate;
353 : class LocalEmbedderHeapTracer;
354 : class MemoryAllocator;
355 : class MemoryReducer;
356 : class MinorMarkCompactCollector;
357 : class ObjectIterator;
358 : class ObjectStats;
359 : class Page;
360 : class PagedSpace;
361 : class RootVisitor;
362 : class Scavenger;
363 : class ScavengeJob;
364 : class Space;
365 : class StoreBuffer;
366 : class TracePossibleWrapperReporter;
367 : class WeakObjectRetainer;
368 :
369 : typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
370 :
371 : enum ArrayStorageAllocationMode {
372 : DONT_INITIALIZE_ARRAY_ELEMENTS,
373 : INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
374 : };
375 :
376 : enum class ClearRecordedSlots { kYes, kNo };
377 :
378 : enum class GarbageCollectionReason {
379 : kUnknown = 0,
380 : kAllocationFailure = 1,
381 : kAllocationLimit = 2,
382 : kContextDisposal = 3,
383 : kCountersExtension = 4,
384 : kDebugger = 5,
385 : kDeserializer = 6,
386 : kExternalMemoryPressure = 7,
387 : kFinalizeMarkingViaStackGuard = 8,
388 : kFinalizeMarkingViaTask = 9,
389 : kFullHashtable = 10,
390 : kHeapProfiler = 11,
391 : kIdleTask = 12,
392 : kLastResort = 13,
393 : kLowMemoryNotification = 14,
394 : kMakeHeapIterable = 15,
395 : kMemoryPressure = 16,
396 : kMemoryReducer = 17,
397 : kRuntime = 18,
398 : kSamplingProfiler = 19,
399 : kSnapshotCreator = 20,
400 : kTesting = 21
401 : // If you add new items here, then update the incremental_marking_reason,
402 : // mark_compact_reason, and scavenge_reason counters in counters.h.
403 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
404 : };
405 :
406 : enum class YoungGenerationHandling {
407 : kRegularScavenge = 0,
408 : kFastPromotionDuringScavenge = 1,
409 : // Histogram::InspectConstructionArguments in chromium requires us to have at
410 : // least three buckets.
411 : kUnusedBucket = 2,
412 : // If you add new items here, then update the young_generation_handling in
413 : // counters.h.
414 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
415 : };
416 :
417 : // A queue of objects promoted during scavenge. Each object is accompanied by
418 : // its size to avoid dereferencing a map pointer for scanning. The last page in
419 : // to-space is used for the promotion queue. On conflict during scavenge, the
420 : // promotion queue is allocated externally and all entries are copied to the
421 : // external queue.
422 : class PromotionQueue {
423 : public:
424 : explicit PromotionQueue(Heap* heap)
425 : : front_(nullptr),
426 : rear_(nullptr),
427 : limit_(nullptr),
428 : emergency_stack_(nullptr),
429 60782 : heap_(heap) {}
430 :
431 : void Initialize();
432 : void Destroy();
433 :
434 : inline void SetNewLimit(Address limit);
435 : inline bool IsBelowPromotionQueue(Address to_space_top);
436 :
437 : inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
438 : inline void remove(HeapObject** target, int32_t* size,
439 : bool* was_marked_black);
440 :
441 : bool is_empty() {
442 48276782 : return (front_ == rear_) &&
443 23557684 : (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
444 : }
445 :
446 : private:
447 : struct Entry {
448 : Entry(HeapObject* obj, int32_t size, bool was_marked_black)
449 10341098 : : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
450 :
451 : HeapObject* obj_;
452 : int32_t size_ : 31;
453 : bool was_marked_black_ : 1;
454 : };
455 :
456 : inline Page* GetHeadPage();
457 :
458 : void RelocateQueueHead();
459 :
460 : // The front of the queue is higher in the memory page chain than the rear.
461 : struct Entry* front_;
462 : struct Entry* rear_;
463 : struct Entry* limit_;
464 :
465 : List<Entry>* emergency_stack_;
466 : Heap* heap_;
467 :
468 : DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
469 : };
470 :
471 : class AllocationResult {
472 : public:
473 67776 : static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
474 67776 : return AllocationResult(space);
475 : }
476 :
477 : // Implicit constructor from Object*.
478 1458524894 : AllocationResult(Object* object) // NOLINT
479 1458524894 : : object_(object) {
480 : // AllocationResults can't return Smis, which are used to represent
481 : // failure and the space to retry in.
482 1458524894 : CHECK(!object->IsSmi());
483 1458524894 : }
484 :
485 : AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
486 :
487 876307406 : inline bool IsRetry() { return object_->IsSmi(); }
488 : inline HeapObject* ToObjectChecked();
489 : inline AllocationSpace RetrySpace();
490 :
491 : template <typename T>
492 : bool To(T** obj) {
493 2314809617 : if (IsRetry()) return false;
494 27873567 : *obj = T::cast(object_);
495 : return true;
496 : }
497 :
498 : private:
499 : explicit AllocationResult(AllocationSpace space)
500 73276 : : object_(Smi::FromInt(static_cast<int>(space))) {}
501 :
502 : Object* object_;
503 : };
504 :
505 : STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
506 :
507 : #ifdef DEBUG
508 : struct CommentStatistic {
509 : const char* comment;
510 : int size;
511 : int count;
512 : void Clear() {
513 : comment = NULL;
514 : size = 0;
515 : count = 0;
516 : }
517 : // Must be small, since an iteration is used for lookup.
518 : static const int kMaxComments = 64;
519 : };
520 : #endif
521 :
522 : class NumberAndSizeInfo BASE_EMBEDDED {
523 : public:
524 30029519 : NumberAndSizeInfo() : number_(0), bytes_(0) {}
525 :
526 : int number() const { return number_; }
527 0 : void increment_number(int num) { number_ += num; }
528 :
529 : int bytes() const { return bytes_; }
530 0 : void increment_bytes(int size) { bytes_ += size; }
531 :
532 : void clear() {
533 0 : number_ = 0;
534 0 : bytes_ = 0;
535 : }
536 :
537 : private:
538 : int number_;
539 : int bytes_;
540 : };
541 :
542 : // HistogramInfo class for recording a single "bar" of a histogram. This
543 : // class is used for collecting statistics to print to the log file.
544 : class HistogramInfo : public NumberAndSizeInfo {
545 : public:
546 30029519 : HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
547 :
548 : const char* name() { return name_; }
549 17142498 : void set_name(const char* name) { name_ = name; }
550 :
551 : private:
552 : const char* name_;
553 : };
554 :
555 118570 : class Heap {
556 : public:
557 : // Declare all the root indices. This defines the root list order.
558 : enum RootListIndex {
559 : #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
560 : STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
561 : #undef ROOT_INDEX_DECLARATION
562 :
563 : #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
564 : INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
565 : #undef STRING_DECLARATION
566 :
567 : #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
568 : PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
569 : #undef SYMBOL_INDEX_DECLARATION
570 :
571 : #define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
572 : PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
573 : WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
574 : #undef SYMBOL_INDEX_DECLARATION
575 :
576 : // Utility type maps
577 : #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
578 : STRUCT_LIST(DECLARE_STRUCT_MAP)
579 : #undef DECLARE_STRUCT_MAP
580 : kStringTableRootIndex,
581 :
582 : #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
583 : SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
584 : #undef ROOT_INDEX_DECLARATION
585 : kRootListLength,
586 : kStrongRootListLength = kStringTableRootIndex,
587 : kSmiRootsStart = kStringTableRootIndex + 1
588 : };
589 :
590 : enum FindMementoMode { kForRuntime, kForGC };
591 :
592 : enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
593 :
594 : enum UpdateAllocationSiteMode { kGlobal, kCached };
595 :
596 : // Taking this lock prevents the GC from entering a phase that relocates
597 : // object references.
598 : class RelocationLock {
599 : public:
600 : explicit RelocationLock(Heap* heap) : heap_(heap) {
601 122535 : heap_->relocation_mutex_.Lock();
602 : }
603 :
604 122535 : ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
605 :
606 : private:
607 : Heap* heap_;
608 : };
609 :
610 : // Support for partial snapshots. After calling this we have a linear
611 : // space to write objects in each space.
612 : struct Chunk {
613 : uint32_t size;
614 : Address start;
615 : Address end;
616 : };
617 : typedef List<Chunk> Reservation;
618 :
619 : static const int kInitalOldGenerationLimitFactor = 2;
620 :
621 : #if V8_OS_ANDROID
622 : // Don't apply pointer multiplier on Android since it has no swap space and
623 : // should instead adapt it's heap size based on available physical memory.
624 : static const int kPointerMultiplier = 1;
625 : #else
626 : static const int kPointerMultiplier = i::kPointerSize / 4;
627 : #endif
628 :
629 : // The new space size has to be a power of 2. Sizes are in MB.
630 : static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
631 : static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
632 : static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
633 : static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
634 :
635 : // The old space size has to be a multiple of Page::kPageSize.
636 : // Sizes are in MB.
637 : static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
638 : static const int kMaxOldSpaceSizeMediumMemoryDevice =
639 : 256 * kPointerMultiplier;
640 : static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
641 : static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
642 :
643 : // The executable size has to be a multiple of Page::kPageSize.
644 : // Sizes are in MB.
645 : static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
646 : static const int kMaxExecutableSizeMediumMemoryDevice =
647 : 192 * kPointerMultiplier;
648 : static const int kMaxExecutableSizeHighMemoryDevice =
649 : 256 * kPointerMultiplier;
650 : static const int kMaxExecutableSizeHugeMemoryDevice =
651 : 256 * kPointerMultiplier;
652 :
653 : static const int kTraceRingBufferSize = 512;
654 : static const int kStacktraceBufferSize = 512;
655 :
656 : V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
657 : V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
658 : static const double kMaxHeapGrowingFactorMemoryConstrained;
659 : static const double kMaxHeapGrowingFactorIdle;
660 : static const double kConservativeHeapGrowingFactor;
661 : static const double kTargetMutatorUtilization;
662 :
663 : static const int kNoGCFlags = 0;
664 : static const int kReduceMemoryFootprintMask = 1;
665 : static const int kAbortIncrementalMarkingMask = 2;
666 : static const int kFinalizeIncrementalMarkingMask = 4;
667 :
668 : // Making the heap iterable requires us to abort incremental marking.
669 : static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
670 :
671 : // The roots that have an index less than this are always in old space.
672 : static const int kOldSpaceRoots = 0x20;
673 :
674 : // The minimum size of a HeapObject on the heap.
675 : static const int kMinObjectSizeInWords = 2;
676 :
677 : static const int kMinPromotedPercentForFastPromotionMode = 90;
678 :
679 : STATIC_ASSERT(kUndefinedValueRootIndex ==
680 : Internals::kUndefinedValueRootIndex);
681 : STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
682 : STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
683 : STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
684 : STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
685 : STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
686 :
687 : // Calculates the maximum amount of filler that could be required by the
688 : // given alignment.
689 : static int GetMaximumFillToAlign(AllocationAlignment alignment);
690 : // Calculates the actual amount of filler required for a given address at the
691 : // given alignment.
692 : static int GetFillToAlign(Address address, AllocationAlignment alignment);
693 :
694 : template <typename T>
695 : static inline bool IsOneByte(T t, int chars);
696 :
697 : static void FatalProcessOutOfMemory(const char* location,
698 : bool is_heap_oom = false);
699 :
700 : V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
701 :
702 : // Checks whether the space is valid.
703 : static bool IsValidAllocationSpace(AllocationSpace space);
704 :
705 : // Generated code can embed direct references to non-writable roots if
706 : // they are in new space.
707 : static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
708 :
709 : static bool IsUnmodifiedHeapObject(Object** p);
710 :
711 : // Zapping is needed for verify heap, and always done in debug builds.
712 : static inline bool ShouldZapGarbage() {
713 : #ifdef DEBUG
714 : return true;
715 : #else
716 : #ifdef VERIFY_HEAP
717 : return FLAG_verify_heap;
718 : #else
719 : return false;
720 : #endif
721 : #endif
722 : }
723 :
724 : static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
725 612647 : return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
726 : }
727 :
728 : static inline GarbageCollector YoungGenerationCollector() {
729 69189 : return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
730 : }
731 :
732 : static inline const char* CollectorName(GarbageCollector collector) {
733 : switch (collector) {
734 : case SCAVENGER:
735 : return "Scavenger";
736 : case MARK_COMPACTOR:
737 : return "Mark-Compact";
738 : case MINOR_MARK_COMPACTOR:
739 : return "Minor Mark-Compact";
740 : }
741 : return "Unknown collector";
742 : }
743 :
744 : V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
745 : double mutator_speed);
746 :
747 : // Copy block of memory from src to dst. Size of block should be aligned
748 : // by pointer size.
749 : static inline void CopyBlock(Address dst, Address src, int byte_size);
750 :
751 : // Determines a static visitor id based on the given {map} that can then be
752 : // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
753 : static int GetStaticVisitorIdForMap(Map* map);
754 :
755 : // Notifies the heap that is ok to start marking or other activities that
756 : // should not happen during deserialization.
757 : void NotifyDeserializationComplete();
758 :
759 : inline Address* NewSpaceAllocationTopAddress();
760 : inline Address* NewSpaceAllocationLimitAddress();
761 : inline Address* OldSpaceAllocationTopAddress();
762 : inline Address* OldSpaceAllocationLimitAddress();
763 :
764 : // Clear the Instanceof cache (used when a prototype changes).
765 : inline void ClearInstanceofCache();
766 :
767 : // FreeSpace objects have a null map after deserialization. Update the map.
768 : void RepairFreeListsAfterDeserialization();
769 :
770 : // Move len elements within a given array from src_index index to dst_index
771 : // index.
772 : void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
773 :
774 : // Initialize a filler object to keep the ability to iterate over the heap
775 : // when introducing gaps within pages. If slots could have been recorded in
776 : // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
777 : // pass ClearRecordedSlots::kNo.
778 : V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(Address addr, int size,
779 : ClearRecordedSlots mode);
780 :
781 : bool CanMoveObjectStart(HeapObject* object);
782 :
783 : static bool IsImmovable(HeapObject* object);
784 :
785 : // Maintain consistency of live bytes during incremental marking.
786 : void AdjustLiveBytes(HeapObject* object, int by);
787 :
788 : // Trim the given array from the left. Note that this relocates the object
789 : // start and hence is only valid if there is only a single reference to it.
790 : FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
791 :
792 : // Trim the given array from the right.
793 : void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
794 :
795 : // Converts the given boolean condition to JavaScript boolean value.
796 : inline Oddball* ToBoolean(bool condition);
797 :
798 : // Notify the heap that a context has been disposed.
799 : int NotifyContextDisposed(bool dependant_context);
800 :
801 : void set_native_contexts_list(Object* object) {
802 404372 : native_contexts_list_ = object;
803 : }
804 : Object* native_contexts_list() const { return native_contexts_list_; }
805 :
806 : void set_allocation_sites_list(Object* object) {
807 1821116 : allocation_sites_list_ = object;
808 : }
809 : Object* allocation_sites_list() { return allocation_sites_list_; }
810 :
811 : // Used in CreateAllocationSiteStub and the (de)serializer.
812 : Object** allocation_sites_list_address() { return &allocation_sites_list_; }
813 :
814 2215 : void set_encountered_weak_collections(Object* weak_collection) {
815 116395 : encountered_weak_collections_ = weak_collection;
816 2215 : }
817 2215 : Object* encountered_weak_collections() const {
818 2215 : return encountered_weak_collections_;
819 : }
820 : void IterateEncounteredWeakCollections(RootVisitor* visitor);
821 :
822 54233828 : void set_encountered_weak_cells(Object* weak_cell) {
823 54349010 : encountered_weak_cells_ = weak_cell;
824 54233828 : }
825 54233827 : Object* encountered_weak_cells() const { return encountered_weak_cells_; }
826 :
827 878783 : void set_encountered_transition_arrays(Object* transition_array) {
828 993101 : encountered_transition_arrays_ = transition_array;
829 878783 : }
830 878783 : Object* encountered_transition_arrays() const {
831 878783 : return encountered_transition_arrays_;
832 : }
833 :
834 : // Number of mark-sweeps.
835 6181 : int ms_count() const { return ms_count_; }
836 :
837 : // Checks whether the given object is allowed to be migrated from it's
838 : // current space into the given destination space. Used for debugging.
839 : inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
840 :
841 : void CheckHandleCount();
842 :
843 : // Number of "runtime allocations" done so far.
844 : uint32_t allocations_count() { return allocations_count_; }
845 :
846 : // Print short heap statistics.
847 : void PrintShortHeapStatistics();
848 :
849 : inline HeapState gc_state() { return gc_state_; }
850 : void SetGCState(HeapState state);
851 :
852 : inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
853 :
854 : // If an object has an AllocationMemento trailing it, return it, otherwise
855 : // return NULL;
856 : template <FindMementoMode mode>
857 : inline AllocationMemento* FindAllocationMemento(HeapObject* object);
858 :
859 : // Returns false if not able to reserve.
860 : bool ReserveSpace(Reservation* reservations, List<Address>* maps);
861 :
862 : //
863 : // Support for the API.
864 : //
865 :
866 : bool CreateApiObjects();
867 :
868 : // Implements the corresponding V8 API function.
869 : bool IdleNotification(double deadline_in_seconds);
870 : bool IdleNotification(int idle_time_in_ms);
871 :
872 : void MemoryPressureNotification(MemoryPressureLevel level,
873 : bool is_isolate_locked);
874 : void CheckMemoryPressure();
875 :
876 : void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
877 : void* data);
878 :
879 : double MonotonicallyIncreasingTimeInMs();
880 :
881 : void RecordStats(HeapStats* stats, bool take_snapshot = false);
882 :
883 : // Check new space expansion criteria and expand semispaces if it was hit.
884 : void CheckNewSpaceExpansionCriteria();
885 :
886 : void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
887 :
888 : // An object should be promoted if the object has survived a
889 : // scavenge operation.
890 : inline bool ShouldBePromoted(Address old_address, int object_size);
891 :
892 : void ClearNormalizedMapCaches();
893 :
894 : void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
895 :
896 : // Completely clear the Instanceof cache (to stop it keeping objects alive
897 : // around a GC).
898 : inline void CompletelyClearInstanceofCache();
899 :
900 : inline uint32_t HashSeed();
901 :
902 : inline int NextScriptId();
903 :
904 : inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
905 : inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
906 : inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
907 : inline void SetGetterStubDeoptPCOffset(int pc_offset);
908 : inline void SetSetterStubDeoptPCOffset(int pc_offset);
909 : inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
910 : inline int GetNextTemplateSerialNumber();
911 :
912 : inline void SetSerializedTemplates(FixedArray* templates);
913 : inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
914 :
915 : // For post mortem debugging.
916 : void RememberUnmappedPage(Address page, bool compacted);
917 :
918 : // Global inline caching age: it is incremented on some GCs after context
919 : // disposal. We use it to flush inline caches.
920 56411323 : int global_ic_age() { return global_ic_age_; }
921 :
922 : void AgeInlineCaches() {
923 5720 : global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
924 : }
925 :
926 : int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
927 :
928 : int64_t external_memory() { return external_memory_; }
929 3468 : void update_external_memory(int64_t delta) { external_memory_ += delta; }
930 :
931 : void update_external_memory_concurrently_freed(intptr_t freed) {
932 : external_memory_concurrently_freed_.Increment(freed);
933 : }
934 :
935 122535 : void account_external_memory_concurrently_freed() {
936 122535 : external_memory_ -= external_memory_concurrently_freed_.Value();
937 : external_memory_concurrently_freed_.SetValue(0);
938 122535 : }
939 :
940 : void DeoptMarkedAllocationSites();
941 :
942 : inline bool DeoptMaybeTenuredAllocationSites();
943 :
944 : void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
945 : Handle<WeakCell> code);
946 :
947 : void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
948 : Handle<DependentCode> dep);
949 :
950 : DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
951 :
952 : void CompactWeakFixedArrays();
953 :
954 : void AddRetainedMap(Handle<Map> map);
955 :
956 : // This event is triggered after successful allocation of a new object made
957 : // by runtime. Allocations of target space for object evacuation do not
958 : // trigger the event. In order to track ALL allocations one must turn off
959 : // FLAG_inline_new and FLAG_use_allocation_folding.
960 : inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
961 :
962 : // This event is triggered after object is moved to a new place.
963 : inline void OnMoveEvent(HeapObject* target, HeapObject* source,
964 : int size_in_bytes);
965 :
966 : bool deserialization_complete() const { return deserialization_complete_; }
967 :
968 : bool HasLowAllocationRate();
969 : bool HasHighFragmentation();
970 : bool HasHighFragmentation(size_t used, size_t committed);
971 :
972 : void ActivateMemoryReducerIfNeeded();
973 :
974 : bool ShouldOptimizeForMemoryUsage();
975 :
976 : bool IsLowMemoryDevice() {
977 : return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
978 : }
979 :
980 : bool IsMemoryConstrainedDevice() {
981 : return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
982 : }
983 :
984 400436 : bool HighMemoryPressure() {
985 400436 : return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
986 : }
987 :
988 : size_t HeapLimitForDebugging() {
989 : const size_t kDebugHeapSizeFactor = 4;
990 : size_t max_limit = std::numeric_limits<size_t>::max() / 4;
991 : return Min(max_limit,
992 36 : initial_max_old_generation_size_ * kDebugHeapSizeFactor);
993 : }
994 :
995 18 : void IncreaseHeapLimitForDebugging() {
996 : max_old_generation_size_ =
997 36 : Max(max_old_generation_size_, HeapLimitForDebugging());
998 : }
999 :
1000 4321 : void RestoreOriginalHeapLimit() {
1001 : // Do not set the limit lower than the live size + some slack.
1002 4321 : size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
1003 : max_old_generation_size_ =
1004 : Min(max_old_generation_size_,
1005 12963 : Max(initial_max_old_generation_size_, min_limit));
1006 4321 : }
1007 :
1008 18 : bool IsHeapLimitIncreasedForDebugging() {
1009 36 : return max_old_generation_size_ == HeapLimitForDebugging();
1010 : }
1011 :
1012 : // ===========================================================================
1013 : // Initialization. ===========================================================
1014 : // ===========================================================================
1015 :
1016 : // Configure heap size in MB before setup. Return false if the heap has been
1017 : // set up already.
1018 : bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
1019 : size_t max_executable_size, size_t code_range_size);
1020 : bool ConfigureHeapDefault();
1021 :
1022 : // Prepares the heap, setting up memory areas that are needed in the isolate
1023 : // without actually creating any objects.
1024 : bool SetUp();
1025 :
1026 : // Bootstraps the object heap with the core set of objects required to run.
1027 : // Returns whether it succeeded.
1028 : bool CreateHeapObjects();
1029 :
1030 : // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
1031 : V8_INLINE void CreateObjectStats();
1032 :
1033 : // Destroys all memory allocated by the heap.
1034 : void TearDown();
1035 :
1036 : // Returns whether SetUp has been called.
1037 : bool HasBeenSetUp();
1038 :
1039 : // ===========================================================================
1040 : // Getters for spaces. =======================================================
1041 : // ===========================================================================
1042 :
1043 : inline Address NewSpaceTop();
1044 :
1045 : NewSpace* new_space() { return new_space_; }
1046 : OldSpace* old_space() { return old_space_; }
1047 : OldSpace* code_space() { return code_space_; }
1048 : MapSpace* map_space() { return map_space_; }
1049 : LargeObjectSpace* lo_space() { return lo_space_; }
1050 :
1051 : inline PagedSpace* paged_space(int idx);
1052 : inline Space* space(int idx);
1053 :
1054 : // Returns name of the space.
1055 : const char* GetSpaceName(int idx);
1056 :
1057 : // ===========================================================================
1058 : // Getters to other components. ==============================================
1059 : // ===========================================================================
1060 :
1061 : GCTracer* tracer() { return tracer_; }
1062 :
1063 : MemoryAllocator* memory_allocator() { return memory_allocator_; }
1064 :
1065 : PromotionQueue* promotion_queue() { return &promotion_queue_; }
1066 :
1067 : inline Isolate* isolate();
1068 :
1069 1603834141 : MarkCompactCollector* mark_compact_collector() {
1070 1603834141 : return mark_compact_collector_;
1071 : }
1072 :
1073 : MinorMarkCompactCollector* minor_mark_compact_collector() {
1074 : return minor_mark_compact_collector_;
1075 : }
1076 :
1077 : // ===========================================================================
1078 : // Root set access. ==========================================================
1079 : // ===========================================================================
1080 :
1081 : // Heap root getters.
1082 : #define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
1083 : ROOT_LIST(ROOT_ACCESSOR)
1084 : #undef ROOT_ACCESSOR
1085 :
1086 : // Utility type maps.
1087 : #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
1088 : STRUCT_LIST(STRUCT_MAP_ACCESSOR)
1089 : #undef STRUCT_MAP_ACCESSOR
1090 :
1091 : #define STRING_ACCESSOR(name, str) inline String* name();
1092 : INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
1093 : #undef STRING_ACCESSOR
1094 :
1095 : #define SYMBOL_ACCESSOR(name) inline Symbol* name();
1096 : PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
1097 : #undef SYMBOL_ACCESSOR
1098 :
1099 : #define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
1100 : PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1101 : WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
1102 : #undef SYMBOL_ACCESSOR
1103 :
1104 1803643965 : Object* root(RootListIndex index) { return roots_[index]; }
1105 : Handle<Object> root_handle(RootListIndex index) {
1106 15925897 : return Handle<Object>(&roots_[index]);
1107 : }
1108 : template <typename T>
1109 : bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
1110 : Object** const handle_location = bit_cast<Object**>(handle.address());
1111 6136899 : if (handle_location >= &roots_[kRootListLength]) return false;
1112 2002051 : if (handle_location < &roots_[0]) return false;
1113 2001895 : *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
1114 : return true;
1115 : }
1116 :
1117 : // Generated code can embed this address to get access to the roots.
1118 : Object** roots_array_start() { return roots_; }
1119 :
1120 : // Sets the stub_cache_ (only used when expanding the dictionary).
1121 : void SetRootCodeStubs(UnseededNumberDictionary* value);
1122 :
1123 : void SetRootMaterializedObjects(FixedArray* objects) {
1124 44 : roots_[kMaterializedObjectsRootIndex] = objects;
1125 : }
1126 :
1127 : void SetRootScriptList(Object* value) {
1128 373 : roots_[kScriptListRootIndex] = value;
1129 : }
1130 :
1131 : void SetRootStringTable(StringTable* value) {
1132 13112229 : roots_[kStringTableRootIndex] = value;
1133 : }
1134 :
1135 : void SetRootNoScriptSharedFunctionInfos(Object* value) {
1136 8565 : roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
1137 : }
1138 :
1139 : void SetMessageListeners(TemplateList* value) {
1140 29556 : roots_[kMessageListenersRootIndex] = value;
1141 : }
1142 :
1143 : // Set the stack limit in the roots_ array. Some architectures generate
1144 : // code that looks here, because it is faster than loading from the static
1145 : // jslimit_/real_jslimit_ variable in the StackGuard.
1146 : void SetStackLimits();
1147 :
1148 : // The stack limit is thread-dependent. To be able to reproduce the same
1149 : // snapshot blob, we need to reset it before serializing.
1150 : void ClearStackLimits();
1151 :
1152 : // Generated code can treat direct references to this root as constant.
1153 : bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1154 :
1155 : Map* MapForFixedTypedArray(ExternalArrayType array_type);
1156 : RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1157 :
1158 : RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1159 : FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1160 :
1161 : void RegisterStrongRoots(Object** start, Object** end);
1162 : void UnregisterStrongRoots(Object** start);
1163 :
1164 : // ===========================================================================
1165 : // Inline allocation. ========================================================
1166 : // ===========================================================================
1167 :
1168 : // Indicates whether inline bump-pointer allocation has been disabled.
1169 : bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1170 :
1171 : // Switch whether inline bump-pointer allocation should be used.
1172 : void EnableInlineAllocation();
1173 : void DisableInlineAllocation();
1174 :
1175 : // ===========================================================================
1176 : // Methods triggering GCs. ===================================================
1177 : // ===========================================================================
1178 :
1179 : // Performs garbage collection operation.
1180 : // Returns whether there is a chance that another major GC could
1181 : // collect more garbage.
1182 : inline bool CollectGarbage(
1183 : AllocationSpace space, GarbageCollectionReason gc_reason,
1184 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1185 :
1186 : // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
1187 : // non-zero, then the slower precise sweeper is used, which leaves the heap
1188 : // in a state where we can iterate over the heap visiting all objects.
1189 : void CollectAllGarbage(
1190 : int flags, GarbageCollectionReason gc_reason,
1191 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1192 :
1193 : // Last hope GC, should try to squeeze as much as possible.
1194 : void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
1195 :
1196 : // Reports and external memory pressure event, either performs a major GC or
1197 : // completes incremental marking in order to free external resources.
1198 : void ReportExternalMemoryPressure();
1199 :
1200 : // Invoked when GC was requested via the stack guard.
1201 : void HandleGCRequest();
1202 :
1203 : // ===========================================================================
1204 : // Iterators. ================================================================
1205 : // ===========================================================================
1206 :
1207 : // Iterates over all roots in the heap.
1208 : void IterateRoots(RootVisitor* v, VisitMode mode);
1209 : // Iterates over all strong roots in the heap.
1210 : void IterateStrongRoots(RootVisitor* v, VisitMode mode);
1211 : // Iterates over entries in the smi roots list. Only interesting to the
1212 : // serializer/deserializer, since GC does not care about smis.
1213 : void IterateSmiRoots(RootVisitor* v);
1214 : // Iterates over all the other roots in the heap.
1215 : void IterateWeakRoots(RootVisitor* v, VisitMode mode);
1216 :
1217 : // Iterate pointers of promoted objects.
1218 : void IterateAndScavengePromotedObject(HeapObject* target, int size,
1219 : bool was_marked_black);
1220 :
1221 : // ===========================================================================
1222 : // Store buffer API. =========================================================
1223 : // ===========================================================================
1224 :
1225 : // Write barrier support for object[offset] = o;
1226 : inline void RecordWrite(Object* object, int offset, Object* o);
1227 : inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
1228 : void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
1229 : void RecordWritesIntoCode(Code* code);
1230 : inline void RecordFixedArrayElements(FixedArray* array, int offset,
1231 : int length);
1232 :
1233 : inline Address* store_buffer_top_address();
1234 :
1235 : void ClearRecordedSlot(HeapObject* object, Object** slot);
1236 : void ClearRecordedSlotRange(Address start, Address end);
1237 :
1238 : bool HasRecordedSlot(HeapObject* object, Object** slot);
1239 :
1240 : // ===========================================================================
1241 : // Incremental marking API. ==================================================
1242 : // ===========================================================================
1243 :
1244 : // Start incremental marking and ensure that idle time handler can perform
1245 : // incremental steps.
1246 : void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
1247 :
1248 : // Starts incremental marking assuming incremental marking is currently
1249 : // stopped.
1250 : void StartIncrementalMarking(
1251 : int gc_flags, GarbageCollectionReason gc_reason,
1252 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1253 :
1254 : void StartIncrementalMarkingIfAllocationLimitIsReached(
1255 : int gc_flags,
1256 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1257 :
1258 : void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1259 :
1260 : bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
1261 : GarbageCollectionReason gc_reason);
1262 :
1263 : void RegisterReservationsForBlackAllocation(Reservation* reservations);
1264 :
1265 16286831 : IncrementalMarking* incremental_marking() { return incremental_marking_; }
1266 :
1267 : // ===========================================================================
1268 : // Concurrent marking API. ===================================================
1269 : // ===========================================================================
1270 :
1271 : ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }
1272 :
1273 : // The runtime uses this function to notify potentially unsafe object layout
1274 : // changes that require special synchronization with the concurrent marker.
1275 : // A layout change is unsafe if
1276 : // - it removes a tagged in-object field.
1277 : // - it replaces a tagged in-objects field with an untagged in-object field.
1278 : void NotifyObjectLayoutChange(HeapObject* object,
1279 : const DisallowHeapAllocation&);
1280 : #ifdef VERIFY_HEAP
1281 : // This function checks that either
1282 : // - the map transition is safe,
1283 : // - or it was communicated to GC using NotifyObjectLayoutChange.
1284 : void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
1285 : #endif
1286 :
1287 : // ===========================================================================
1288 : // Embedder heap tracer support. =============================================
1289 : // ===========================================================================
1290 :
1291 395954 : LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
1292 395954 : return local_embedder_heap_tracer_;
1293 : }
1294 : void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
1295 : void TracePossibleWrapper(JSObject* js_object);
1296 : void RegisterExternallyReferencedObject(Object** object);
1297 :
1298 : // ===========================================================================
1299 : // External string table API. ================================================
1300 : // ===========================================================================
1301 :
1302 : // Registers an external string.
1303 : inline void RegisterExternalString(String* string);
1304 :
1305 : // Finalizes an external string by deleting the associated external
1306 : // data and clearing the resource pointer.
1307 : inline void FinalizeExternalString(String* string);
1308 :
1309 : // ===========================================================================
1310 : // Methods checking/returning the space of a given object/address. ===========
1311 : // ===========================================================================
1312 :
1313 : // Returns whether the object resides in new space.
1314 : inline bool InNewSpace(Object* object);
1315 : inline bool InFromSpace(Object* object);
1316 : inline bool InToSpace(Object* object);
1317 :
1318 : // Returns whether the object resides in old space.
1319 : inline bool InOldSpace(Object* object);
1320 :
1321 : // Checks whether an address/object in the heap (including auxiliary
1322 : // area and unused area).
1323 : bool Contains(HeapObject* value);
1324 :
1325 : // Checks whether an address/object in a space.
1326 : // Currently used by tests, serialization and heap verification only.
1327 : bool InSpace(HeapObject* value, AllocationSpace space);
1328 :
1329 : // Slow methods that can be used for verification as they can also be used
1330 : // with off-heap Addresses.
1331 : bool ContainsSlow(Address addr);
1332 : bool InSpaceSlow(Address addr, AllocationSpace space);
1333 : inline bool InNewSpaceSlow(Address address);
1334 : inline bool InOldSpaceSlow(Address address);
1335 :
1336 : // ===========================================================================
1337 : // Object statistics tracking. ===============================================
1338 : // ===========================================================================
1339 :
1340 : // Returns the number of buckets used by object statistics tracking during a
1341 : // major GC. Note that the following methods fail gracefully when the bounds
1342 : // are exceeded though.
1343 : size_t NumberOfTrackedHeapObjectTypes();
1344 :
1345 : // Returns object statistics about count and size at the last major GC.
1346 : // Objects are being grouped into buckets that roughly resemble existing
1347 : // instance types.
1348 : size_t ObjectCountAtLastGC(size_t index);
1349 : size_t ObjectSizeAtLastGC(size_t index);
1350 :
1351 : // Retrieves names of buckets used by object statistics tracking.
1352 : bool GetObjectTypeName(size_t index, const char** object_type,
1353 : const char** object_sub_type);
1354 :
1355 : // ===========================================================================
1356 : // Code statistics. ==========================================================
1357 : // ===========================================================================
1358 :
1359 : // Collect code (Code and BytecodeArray objects) statistics.
1360 : void CollectCodeStatistics();
1361 :
1362 : // ===========================================================================
1363 : // GC statistics. ============================================================
1364 : // ===========================================================================
1365 :
1366 : // Returns the maximum amount of memory reserved for the heap.
1367 : size_t MaxReserved() {
1368 60789 : return 2 * max_semi_space_size_ + max_old_generation_size_;
1369 : }
1370 : size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
1371 : size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
1372 : size_t MaxOldGenerationSize() { return max_old_generation_size_; }
1373 : size_t MaxExecutableSize() { return max_executable_size_; }
1374 :
1375 : // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1376 : // more spaces are needed until it reaches the limit.
1377 : size_t Capacity();
1378 :
1379 : // Returns the capacity of the old generation.
1380 : size_t OldGenerationCapacity();
1381 :
1382 : // Returns the amount of memory currently committed for the heap.
1383 : size_t CommittedMemory();
1384 :
1385 : // Returns the amount of memory currently committed for the old space.
1386 : size_t CommittedOldGenerationMemory();
1387 :
1388 : // Returns the amount of executable memory currently committed for the heap.
1389 : size_t CommittedMemoryExecutable();
1390 :
1391 : // Returns the amount of phyical memory currently committed for the heap.
1392 : size_t CommittedPhysicalMemory();
1393 :
1394 : // Returns the maximum amount of memory ever committed for the heap.
1395 : size_t MaximumCommittedMemory() { return maximum_committed_; }
1396 :
1397 : // Updates the maximum committed memory for the heap. Should be called
1398 : // whenever a space grows.
1399 : void UpdateMaximumCommitted();
1400 :
1401 : // Returns the available bytes in space w/o growing.
1402 : // Heap doesn't guarantee that it can allocate an object that requires
1403 : // all available bytes. Check MaxHeapObjectSize() instead.
1404 : size_t Available();
1405 :
1406 : // Returns of size of all objects residing in the heap.
1407 : size_t SizeOfObjects();
1408 :
1409 : void UpdateSurvivalStatistics(int start_new_space_size);
1410 :
1411 : inline void IncrementPromotedObjectsSize(size_t object_size) {
1412 45910787 : promoted_objects_size_ += object_size;
1413 : }
1414 : inline size_t promoted_objects_size() { return promoted_objects_size_; }
1415 :
1416 : inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1417 70563195 : semi_space_copied_object_size_ += object_size;
1418 : }
1419 : inline size_t semi_space_copied_object_size() {
1420 : return semi_space_copied_object_size_;
1421 : }
1422 :
1423 : inline size_t SurvivedNewSpaceObjectSize() {
1424 122507 : return promoted_objects_size_ + semi_space_copied_object_size_;
1425 : }
1426 :
1427 140920 : inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1428 :
1429 187650 : inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1430 :
1431 192529 : inline void IncrementNodesPromoted() { nodes_promoted_++; }
1432 :
1433 : inline void IncrementYoungSurvivorsCounter(size_t survived) {
1434 128449 : survived_last_scavenge_ = survived;
1435 128449 : survived_since_last_expansion_ += survived;
1436 : }
1437 :
1438 : inline uint64_t PromotedTotalSize() {
1439 1260066 : return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1440 : }
1441 :
1442 : inline void UpdateNewSpaceAllocationCounter();
1443 :
1444 : inline size_t NewSpaceAllocationCounter();
1445 :
1446 : // This should be used only for testing.
1447 : void set_new_space_allocation_counter(size_t new_value) {
1448 : new_space_allocation_counter_ = new_value;
1449 : }
1450 :
1451 : void UpdateOldGenerationAllocationCounter() {
1452 : old_generation_allocation_counter_at_last_gc_ =
1453 53346 : OldGenerationAllocationCounter();
1454 : }
1455 :
1456 : size_t OldGenerationAllocationCounter() {
1457 215910 : return old_generation_allocation_counter_at_last_gc_ +
1458 215910 : PromotedSinceLastGC();
1459 : }
1460 :
1461 : // This should be used only for testing.
1462 : void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1463 : old_generation_allocation_counter_at_last_gc_ = new_value;
1464 : }
1465 :
1466 : size_t PromotedSinceLastGC() {
1467 215910 : return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
1468 : }
1469 :
1470 15827509 : int gc_count() const { return gc_count_; }
1471 :
1472 : // Returns the size of objects residing in non new spaces.
1473 : size_t PromotedSpaceSizeOfObjects();
1474 :
1475 : // ===========================================================================
1476 : // Prologue/epilogue callback methods.========================================
1477 : // ===========================================================================
1478 :
1479 : void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
1480 : GCType gc_type_filter, bool pass_isolate = true);
1481 : void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
1482 :
1483 : void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
1484 : GCType gc_type_filter, bool pass_isolate = true);
1485 : void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
1486 :
1487 : void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1488 : void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1489 :
1490 : // ===========================================================================
1491 : // Allocation methods. =======================================================
1492 : // ===========================================================================
1493 :
1494 : // Creates a filler object and returns a heap object immediately after it.
1495 : MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
1496 : int filler_size);
1497 :
1498 : // Creates a filler object if needed for alignment and returns a heap object
1499 : // immediately after it. If any space is left after the returned object,
1500 : // another filler object is created so the over allocated memory is iterable.
1501 : MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
1502 : int object_size,
1503 : int allocation_size,
1504 : AllocationAlignment alignment);
1505 :
1506 : // ===========================================================================
1507 : // ArrayBuffer tracking. =====================================================
1508 : // ===========================================================================
1509 :
1510 : // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1511 : // in the registration/unregistration APIs. Consider dropping the "New" from
1512 : // "RegisterNewArrayBuffer" because one can re-register a previously
1513 : // unregistered buffer, too, and the name is confusing.
1514 : void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
1515 : void UnregisterArrayBuffer(JSArrayBuffer* buffer);
1516 :
1517 : // ===========================================================================
1518 : // Allocation site tracking. =================================================
1519 : // ===========================================================================
1520 :
1521 : // Updates the AllocationSite of a given {object}. If the global prenuring
1522 : // storage is passed as {pretenuring_feedback} the memento found count on
1523 : // the corresponding allocation site is immediately updated and an entry
1524 : // in the hash map is created. Otherwise the entry (including a the count
1525 : // value) is cached on the local pretenuring feedback.
1526 : template <UpdateAllocationSiteMode mode>
1527 : inline void UpdateAllocationSite(HeapObject* object,
1528 : base::HashMap* pretenuring_feedback);
1529 :
1530 : // Removes an entry from the global pretenuring storage.
1531 : inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
1532 :
1533 : // Merges local pretenuring feedback into the global one. Note that this
1534 : // method needs to be called after evacuation, as allocation sites may be
1535 : // evacuated and this method resolves forward pointers accordingly.
1536 : void MergeAllocationSitePretenuringFeedback(
1537 : const base::HashMap& local_pretenuring_feedback);
1538 :
1539 : // =============================================================================
1540 :
1541 : #ifdef VERIFY_HEAP
1542 : // Verify the heap is in its normal state before or after a GC.
1543 : void Verify();
1544 : #endif
1545 :
1546 : #ifdef DEBUG
1547 : void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1548 :
1549 : void Print();
1550 : void PrintHandles();
1551 :
1552 : // Report heap statistics.
1553 : void ReportHeapStatistics(const char* title);
1554 : void ReportCodeStatistics(const char* title);
1555 : #endif
1556 :
1557 : static const char* GarbageCollectionReasonToString(
1558 : GarbageCollectionReason gc_reason);
1559 :
1560 : private:
1561 : class SkipStoreBufferScope;
1562 : class PretenuringScope;
1563 :
1564 : // External strings table is a place where all external strings are
1565 : // registered. We need to keep track of such strings to properly
1566 : // finalize them.
1567 : class ExternalStringTable {
1568 : public:
1569 : // Registers an external string.
1570 : inline void AddString(String* string);
1571 :
1572 : inline void IterateAll(RootVisitor* v);
1573 : inline void IterateNewSpaceStrings(RootVisitor* v);
1574 : inline void PromoteAllNewSpaceStrings();
1575 :
1576 : // Restores internal invariant and gets rid of collected strings. Must be
1577 : // called after each Iterate*() that modified the strings.
1578 : void CleanUpAll();
1579 : void CleanUpNewSpaceStrings();
1580 :
1581 : // Destroys all allocated memory.
1582 : void TearDown();
1583 :
1584 : private:
1585 60782 : explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1586 :
1587 : inline void Verify();
1588 :
1589 : inline void AddOldString(String* string);
1590 :
1591 : // Notifies the table that only a prefix of the new list is valid.
1592 : inline void ShrinkNewStrings(int position);
1593 :
1594 : // To speed up scavenge collections new space string are kept
1595 : // separate from old space strings.
1596 : List<Object*> new_space_strings_;
1597 : List<Object*> old_space_strings_;
1598 :
1599 : Heap* heap_;
1600 :
1601 : friend class Heap;
1602 :
1603 : DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1604 : };
1605 :
1606 : struct StrongRootsList;
1607 :
1608 : struct StringTypeTable {
1609 : InstanceType type;
1610 : int size;
1611 : RootListIndex index;
1612 : };
1613 :
1614 : struct ConstantStringTable {
1615 : const char* contents;
1616 : RootListIndex index;
1617 : };
1618 :
1619 : struct StructTable {
1620 : InstanceType type;
1621 : int size;
1622 : RootListIndex index;
1623 : };
1624 :
1625 : struct GCCallbackPair {
1626 : GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
1627 : bool pass_isolate)
1628 66 : : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}
1629 :
1630 : bool operator==(const GCCallbackPair& other) const {
1631 : return other.callback == callback;
1632 : }
1633 :
1634 : v8::Isolate::GCCallback callback;
1635 : GCType gc_type;
1636 : bool pass_isolate;
1637 : };
1638 :
1639 : typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
1640 : Object** pointer);
1641 :
1642 : static const int kInitialStringTableSize = 2048;
1643 : static const int kInitialEvalCacheSize = 64;
1644 : static const int kInitialNumberStringCacheSize = 256;
1645 :
1646 : static const int kRememberedUnmappedPages = 128;
1647 :
1648 : static const StringTypeTable string_type_table[];
1649 : static const ConstantStringTable constant_string_table[];
1650 : static const StructTable struct_table[];
1651 :
1652 : static const int kYoungSurvivalRateHighThreshold = 90;
1653 : static const int kYoungSurvivalRateAllowedDeviation = 15;
1654 : static const int kOldSurvivalRateLowThreshold = 10;
1655 :
1656 : static const int kMaxMarkCompactsInIdleRound = 7;
1657 : static const int kIdleScavengeThreshold = 5;
1658 :
1659 : static const int kInitialFeedbackCapacity = 256;
1660 :
1661 : Heap();
1662 :
1663 : static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1664 : Heap* heap, Object** pointer);
1665 :
1666 : // Selects the proper allocation space based on the pretenuring decision.
1667 : static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1668 380974903 : return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
1669 : }
1670 :
1671 : #define ROOT_ACCESSOR(type, name, camel_name) \
1672 : inline void set_##name(type* value);
1673 : ROOT_LIST(ROOT_ACCESSOR)
1674 : #undef ROOT_ACCESSOR
1675 :
1676 : StoreBuffer* store_buffer() { return store_buffer_; }
1677 :
1678 : void set_current_gc_flags(int flags) {
1679 87811 : current_gc_flags_ = flags;
1680 : DCHECK(!ShouldFinalizeIncrementalMarking() ||
1681 : !ShouldAbortIncrementalMarking());
1682 : }
1683 :
1684 : inline bool ShouldReduceMemory() const {
1685 562194 : return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1686 : }
1687 :
1688 : inline bool ShouldAbortIncrementalMarking() const {
1689 71325 : return (current_gc_flags_ & kAbortIncrementalMarkingMask) != 0;
1690 : }
1691 :
1692 : inline bool ShouldFinalizeIncrementalMarking() const {
1693 : return (current_gc_flags_ & kFinalizeIncrementalMarkingMask) != 0;
1694 : }
1695 :
1696 : void PreprocessStackTraces();
1697 :
1698 : // Checks whether a global GC is necessary
1699 : GarbageCollector SelectGarbageCollector(AllocationSpace space,
1700 : const char** reason);
1701 :
1702 : // Make sure there is a filler value behind the top of the new space
1703 : // so that the GC does not confuse some unintialized/stale memory
1704 : // with the allocation memento of the object at the top
1705 : void EnsureFillerObjectAtTop();
1706 :
1707 : // Ensure that we have swept all spaces in such a way that we can iterate
1708 : // over all objects. May cause a GC.
1709 : void MakeHeapIterable();
1710 :
1711 : // Performs garbage collection operation.
1712 : // Returns whether there is a chance that another major GC could
1713 : // collect more garbage.
1714 : bool CollectGarbage(
1715 : GarbageCollector collector, GarbageCollectionReason gc_reason,
1716 : const char* collector_reason,
1717 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1718 :
1719 : // Performs garbage collection
1720 : // Returns whether there is a chance another major GC could
1721 : // collect more garbage.
1722 : bool PerformGarbageCollection(
1723 : GarbageCollector collector,
1724 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1725 :
1726 : inline void UpdateOldSpaceLimits();
1727 :
1728 : // Initializes a JSObject based on its map.
1729 : void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
1730 : Map* map);
1731 :
1732 : // Initializes JSObject body starting at given offset.
1733 : void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
1734 :
1735 : void InitializeAllocationMemento(AllocationMemento* memento,
1736 : AllocationSite* allocation_site);
1737 :
1738 : bool CreateInitialMaps();
1739 : void CreateInitialObjects();
1740 :
1741 : // These five Create*EntryStub functions are here and forced to not be inlined
1742 : // because of a gcc-4.4 bug that assigns wrong vtable entries.
1743 : NO_INLINE(void CreateJSEntryStub());
1744 : NO_INLINE(void CreateJSConstructEntryStub());
1745 :
1746 : void CreateFixedStubs();
1747 :
1748 : HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
1749 :
1750 : // Commits from space if it is uncommitted.
1751 : void EnsureFromSpaceIsCommitted();
1752 :
1753 : // Uncommit unused semi space.
1754 : bool UncommitFromSpace();
1755 :
1756 : // Fill in bogus values in from space
1757 : void ZapFromSpace();
1758 :
1759 : // Deopts all code that contains allocation instruction which are tenured or
1760 : // not tenured. Moreover it clears the pretenuring allocation site statistics.
1761 : void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1762 :
1763 : // Evaluates local pretenuring for the old space and calls
1764 : // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1765 : // the old space.
1766 : void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1767 :
1768 : // Record statistics before and after garbage collection.
1769 : void ReportStatisticsBeforeGC();
1770 : void ReportStatisticsAfterGC();
1771 :
1772 : // Creates and installs the full-sized number string cache.
1773 : int FullSizeNumberStringCacheLength();
1774 : // Flush the number to string cache.
1775 : void FlushNumberStringCache();
1776 :
1777 : void ConfigureInitialOldGenerationSize();
1778 :
1779 : bool HasLowYoungGenerationAllocationRate();
1780 : bool HasLowOldGenerationAllocationRate();
1781 : double YoungGenerationMutatorUtilization();
1782 : double OldGenerationMutatorUtilization();
1783 :
1784 : void ReduceNewSpaceSize();
1785 :
1786 : GCIdleTimeHeapState ComputeHeapState();
1787 :
1788 : bool PerformIdleTimeAction(GCIdleTimeAction action,
1789 : GCIdleTimeHeapState heap_state,
1790 : double deadline_in_ms);
1791 :
1792 : void IdleNotificationEpilogue(GCIdleTimeAction action,
1793 : GCIdleTimeHeapState heap_state, double start_ms,
1794 : double deadline_in_ms);
1795 :
1796 : inline void UpdateAllocationsHash(HeapObject* object);
1797 : inline void UpdateAllocationsHash(uint32_t value);
1798 : void PrintAlloctionsHash();
1799 :
1800 : void AddToRingBuffer(const char* string);
1801 : void GetFromRingBuffer(char* buffer);
1802 :
1803 : void CompactRetainedMaps(ArrayList* retained_maps);
1804 :
1805 : void CollectGarbageOnMemoryPressure();
1806 :
1807 : void InvokeOutOfMemoryCallback();
1808 :
1809 : void ComputeFastPromotionMode(double survival_rate);
1810 :
1811 : // Attempt to over-approximate the weak closure by marking object groups and
1812 : // implicit references from global handles, but don't atomically complete
1813 : // marking. If we continue to mark incrementally, we might have marked
1814 : // objects that die later.
1815 : void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
1816 :
1817 : // Returns the timer used for a given GC type.
1818 : // - GCScavenger: young generation GC
1819 : // - GCCompactor: full GC
1820 : // - GCFinalzeMC: finalization of incremental full GC
1821 : // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1822 : // memory reduction
1823 : HistogramTimer* GCTypeTimer(GarbageCollector collector);
1824 :
1825 : // ===========================================================================
1826 : // Pretenuring. ==============================================================
1827 : // ===========================================================================
1828 :
1829 : // Pretenuring decisions are made based on feedback collected during new space
1830 : // evacuation. Note that between feedback collection and calling this method
1831 : // object in old space must not move.
1832 : void ProcessPretenuringFeedback();
1833 :
1834 : // ===========================================================================
1835 : // Actual GC. ================================================================
1836 : // ===========================================================================
1837 :
1838 : // Code that should be run before and after each GC. Includes some
1839 : // reporting/verification activities when compiled with DEBUG set.
1840 : void GarbageCollectionPrologue();
1841 : void GarbageCollectionEpilogue();
1842 :
1843 : // Performs a major collection in the whole heap.
1844 : void MarkCompact();
1845 : // Performs a minor collection of just the young generation.
1846 : void MinorMarkCompact();
1847 :
1848 : // Code to be run before and after mark-compact.
1849 : void MarkCompactPrologue();
1850 : void MarkCompactEpilogue();
1851 :
1852 : // Performs a minor collection in new generation.
1853 : void Scavenge();
1854 : void EvacuateYoungGeneration();
1855 :
1856 : Address DoScavenge(Address new_space_front);
1857 :
1858 : void UpdateNewSpaceReferencesInExternalStringTable(
1859 : ExternalStringTableUpdaterCallback updater_func);
1860 :
1861 : void UpdateReferencesInExternalStringTable(
1862 : ExternalStringTableUpdaterCallback updater_func);
1863 :
1864 : void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1865 : void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1866 : void ProcessNativeContexts(WeakObjectRetainer* retainer);
1867 : void ProcessAllocationSites(WeakObjectRetainer* retainer);
1868 : void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1869 :
1870 : // ===========================================================================
1871 : // GC statistics. ============================================================
1872 : // ===========================================================================
1873 :
1874 315614 : inline size_t OldGenerationSpaceAvailable() {
1875 631228 : if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
1876 310439 : return old_generation_allocation_limit_ -
1877 310439 : static_cast<size_t>(PromotedTotalSize());
1878 : }
1879 :
1880 : // We allow incremental marking to overshoot the allocation limit for
1881 : // performace reasons. If the overshoot is too large then we are more
1882 : // eager to finalize incremental marking.
1883 2004 : inline bool AllocationLimitOvershotByLargeMargin() {
1884 : // This guards against too eager finalization in small heaps.
1885 : // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
1886 : size_t kMarginForSmallHeaps = 32u * MB;
1887 4008 : if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
1888 1976 : uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
1889 : // Overshoot margin is 50% of allocation limit or half-way to the max heap
1890 : // with special handling of small heaps.
1891 : uint64_t margin =
1892 : Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1893 1976 : (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
1894 1976 : return overshoot >= margin;
1895 : }
1896 :
1897 : void UpdateTotalGCTime(double duration);
1898 :
1899 122535 : bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1900 :
1901 : // ===========================================================================
1902 : // Growing strategy. =========================================================
1903 : // ===========================================================================
1904 :
1905 : // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
1906 : // This constant limits the effect of load RAIL mode on GC.
1907 : // The value is arbitrary and chosen as the largest load time observed in
1908 : // v8 browsing benchmarks.
1909 : static const int kMaxLoadTimeMs = 7000;
1910 :
1911 : bool ShouldOptimizeForLoadTime();
1912 :
1913 : // Decrease the allocation limit if the new limit based on the given
1914 : // parameters is lower than the current limit.
1915 : void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1916 : double mutator_speed);
1917 :
1918 : // Calculates the allocation limit based on a given growing factor and a
1919 : // given old generation size.
1920 : size_t CalculateOldGenerationAllocationLimit(double factor,
1921 : size_t old_gen_size);
1922 :
1923 : // Sets the allocation limit to trigger the next full garbage collection.
1924 : void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
1925 : double mutator_speed);
1926 :
1927 : size_t MinimumAllocationLimitGrowingStep();
1928 :
1929 : size_t old_generation_allocation_limit() const {
1930 : return old_generation_allocation_limit_;
1931 : }
1932 :
1933 : bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
1934 :
1935 527542 : bool CanExpandOldGeneration(size_t size) {
1936 531363 : if (force_oom_) return false;
1937 1055084 : return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
1938 : }
1939 :
1940 33368 : bool IsCloseToOutOfMemory(size_t slack) {
1941 33368 : return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
1942 : }
1943 :
1944 : bool ShouldExpandOldGenerationOnSlowAllocation();
1945 :
1946 : enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
1947 : IncrementalMarkingLimit IncrementalMarkingLimitReached();
1948 :
1949 : // ===========================================================================
1950 : // Idle notification. ========================================================
1951 : // ===========================================================================
1952 :
1953 : bool RecentIdleNotificationHappened();
1954 : void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1955 :
1956 : // ===========================================================================
1957 : // HeapIterator helpers. =====================================================
1958 : // ===========================================================================
1959 :
1960 24569 : void heap_iterator_start() { heap_iterator_depth_++; }
1961 :
1962 24569 : void heap_iterator_end() { heap_iterator_depth_--; }
1963 :
1964 : bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
1965 :
1966 : // ===========================================================================
1967 : // Allocation methods. =======================================================
1968 : // ===========================================================================
1969 :
1970 : // Returns a deep copy of the JavaScript object.
1971 : // Properties and elements are copied too.
1972 : // Optionally takes an AllocationSite to be appended in an AllocationMemento.
1973 : MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
1974 : AllocationSite* site = NULL);
1975 :
1976 : // Allocates a JS Map in the heap.
1977 : MUST_USE_RESULT AllocationResult
1978 : AllocateMap(InstanceType instance_type, int instance_size,
1979 : ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
1980 :
1981 : // Allocates and initializes a new JavaScript object based on a
1982 : // constructor.
1983 : // If allocation_site is non-null, then a memento is emitted after the object
1984 : // that points to the site.
1985 : MUST_USE_RESULT AllocationResult AllocateJSObject(
1986 : JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
1987 : AllocationSite* allocation_site = NULL);
1988 :
1989 : // Allocates and initializes a new JavaScript object based on a map.
1990 : // Passing an allocation site means that a memento will be created that
1991 : // points to the site.
1992 : MUST_USE_RESULT AllocationResult
1993 : AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
1994 : AllocationSite* allocation_site = NULL);
1995 :
1996 : // Allocates a HeapNumber from value.
1997 : MUST_USE_RESULT AllocationResult AllocateHeapNumber(
1998 : MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
1999 :
2000 : // Allocates a byte array of the specified length
2001 : MUST_USE_RESULT AllocationResult
2002 : AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
2003 :
2004 : // Allocates a bytecode array with given contents.
2005 : MUST_USE_RESULT AllocationResult
2006 : AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
2007 : int parameter_count, FixedArray* constant_pool);
2008 :
2009 : MUST_USE_RESULT AllocationResult CopyCode(Code* code);
2010 :
2011 : MUST_USE_RESULT AllocationResult
2012 : CopyBytecodeArray(BytecodeArray* bytecode_array);
2013 :
2014 : // Allocates a fixed array initialized with undefined values
2015 : MUST_USE_RESULT AllocationResult
2016 : AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
2017 :
2018 : // Allocate an uninitialized object. The memory is non-executable if the
2019 : // hardware and OS allow. This is the single choke-point for allocations
2020 : // performed by the runtime and should not be bypassed (to extend this to
2021 : // inlined allocations, use the Heap::DisableInlineAllocation() support).
2022 : MUST_USE_RESULT inline AllocationResult AllocateRaw(
2023 : int size_in_bytes, AllocationSpace space,
2024 : AllocationAlignment aligment = kWordAligned);
2025 :
2026 : // Allocates a heap object based on the map.
2027 : MUST_USE_RESULT AllocationResult
2028 : Allocate(Map* map, AllocationSpace space,
2029 : AllocationSite* allocation_site = NULL);
2030 :
2031 : // Allocates a partial map for bootstrapping.
2032 : MUST_USE_RESULT AllocationResult
2033 : AllocatePartialMap(InstanceType instance_type, int instance_size);
2034 :
2035 : // Allocate a block of memory in the given space (filled with a filler).
2036 : // Used as a fall-back for generated code when the space is full.
2037 : MUST_USE_RESULT AllocationResult
2038 : AllocateFillerObject(int size, bool double_align, AllocationSpace space);
2039 :
2040 : // Allocate an uninitialized fixed array.
2041 : MUST_USE_RESULT AllocationResult
2042 : AllocateRawFixedArray(int length, PretenureFlag pretenure);
2043 :
2044 : // Allocate an uninitialized fixed double array.
2045 : MUST_USE_RESULT AllocationResult
2046 : AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
2047 :
2048 : // Allocate an initialized fixed array with the given filler value.
2049 : MUST_USE_RESULT AllocationResult
2050 : AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
2051 : Object* filler);
2052 :
2053 : // Allocate and partially initializes a String. There are two String
2054 : // encodings: one-byte and two-byte. These functions allocate a string of
2055 : // the given length and set its map and length fields. The characters of
2056 : // the string are uninitialized.
2057 : MUST_USE_RESULT AllocationResult
2058 : AllocateRawOneByteString(int length, PretenureFlag pretenure);
2059 : MUST_USE_RESULT AllocationResult
2060 : AllocateRawTwoByteString(int length, PretenureFlag pretenure);
2061 :
2062 : // Allocates an internalized string in old space based on the character
2063 : // stream.
2064 : MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
2065 : Vector<const char> str, int chars, uint32_t hash_field);
2066 :
2067 : MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
2068 : Vector<const uint8_t> str, uint32_t hash_field);
2069 :
2070 : MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
2071 : Vector<const uc16> str, uint32_t hash_field);
2072 :
2073 : template <bool is_one_byte, typename T>
2074 : MUST_USE_RESULT AllocationResult
2075 1202 : AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
2076 :
2077 : template <typename T>
2078 : MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
2079 : T t, int chars, uint32_t hash_field);
2080 :
2081 : // Allocates an uninitialized fixed array. It must be filled by the caller.
2082 : MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
2083 :
2084 : // Make a copy of src and return it.
2085 : MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
2086 :
2087 : // Make a copy of src, also grow the copy, and return the copy.
2088 : MUST_USE_RESULT AllocationResult
2089 : CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
2090 :
2091 : // Make a copy of src, also grow the copy, and return the copy.
2092 : MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
2093 : int new_len,
2094 : PretenureFlag pretenure);
2095 :
2096 : // Make a copy of src, set the map, and return the copy.
2097 : MUST_USE_RESULT AllocationResult
2098 : CopyFixedArrayWithMap(FixedArray* src, Map* map);
2099 :
2100 : // Make a copy of src and return it.
2101 : MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
2102 : FixedDoubleArray* src);
2103 :
2104 : // Computes a single character string where the character has code.
2105 : // A cache is used for one-byte (Latin1) codes.
2106 : MUST_USE_RESULT AllocationResult
2107 : LookupSingleCharacterStringFromCode(uint16_t code);
2108 :
2109 : // Allocate a symbol in old space.
2110 : MUST_USE_RESULT AllocationResult AllocateSymbol();
2111 :
2112 : // Allocates an external array of the specified length and type.
2113 : MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
2114 : int length, ExternalArrayType array_type, void* external_pointer,
2115 : PretenureFlag pretenure);
2116 :
2117 : // Allocates a fixed typed array of the specified length and type.
2118 : MUST_USE_RESULT AllocationResult
2119 : AllocateFixedTypedArray(int length, ExternalArrayType array_type,
2120 : bool initialize, PretenureFlag pretenure);
2121 :
2122 : // Make a copy of src and return it.
2123 : MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
2124 :
2125 : // Make a copy of src, set the map, and return the copy.
2126 : MUST_USE_RESULT AllocationResult
2127 : CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
2128 :
2129 : // Allocates a fixed double array with uninitialized values. Returns
2130 : MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
2131 : int length, PretenureFlag pretenure = NOT_TENURED);
2132 :
2133 : // Allocate empty fixed array.
2134 : MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
2135 :
2136 : // Allocate empty scope info.
2137 : MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();
2138 :
2139 : // Allocate empty fixed typed array of given type.
2140 : MUST_USE_RESULT AllocationResult
2141 : AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
2142 :
2143 : // Allocate a tenured simple cell.
2144 : MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
2145 :
2146 : // Allocate a tenured JS global property cell initialized with the hole.
2147 : MUST_USE_RESULT AllocationResult AllocatePropertyCell();
2148 :
2149 : MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
2150 :
2151 : MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
2152 :
2153 : // Allocates a new utility object in the old generation.
2154 : MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
2155 :
2156 : // Allocates a new foreign object.
2157 : MUST_USE_RESULT AllocationResult
2158 : AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
2159 :
2160 : MUST_USE_RESULT AllocationResult
2161 : AllocateCode(int object_size, bool immovable);
2162 :
2163 : // ===========================================================================
2164 :
2165 : void set_force_oom(bool value) { force_oom_ = value; }
2166 :
2167 : // The amount of external memory registered through the API.
2168 : int64_t external_memory_;
2169 :
2170 : // The limit when to trigger memory pressure from the API.
2171 : int64_t external_memory_limit_;
2172 :
2173 : // Caches the amount of external memory registered at the last MC.
2174 : int64_t external_memory_at_last_mark_compact_;
2175 :
2176 : // The amount of memory that has been freed concurrently.
2177 : base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;
2178 :
2179 : // This can be calculated directly from a pointer to the heap; however, it is
2180 : // more expedient to get at the isolate directly from within Heap methods.
2181 : Isolate* isolate_;
2182 :
2183 : Object* roots_[kRootListLength];
2184 :
2185 : size_t code_range_size_;
2186 : size_t max_semi_space_size_;
2187 : size_t initial_semispace_size_;
2188 : size_t max_old_generation_size_;
2189 : size_t initial_max_old_generation_size_;
2190 : size_t initial_old_generation_size_;
2191 : bool old_generation_size_configured_;
2192 : size_t max_executable_size_;
2193 : size_t maximum_committed_;
2194 :
2195 : // For keeping track of how much data has survived
2196 : // scavenge since last new space expansion.
2197 : size_t survived_since_last_expansion_;
2198 :
2199 : // ... and since the last scavenge.
2200 : size_t survived_last_scavenge_;
2201 :
2202 : // This is not the depth of nested AlwaysAllocateScope's but rather a single
2203 : // count, as scopes can be acquired from multiple tasks (read: threads).
2204 : base::AtomicNumber<size_t> always_allocate_scope_count_;
2205 :
2206 : // Stores the memory pressure level that set by MemoryPressureNotification
2207 : // and reset by a mark-compact garbage collection.
2208 : base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
2209 :
2210 : v8::debug::OutOfMemoryCallback out_of_memory_callback_;
2211 : void* out_of_memory_callback_data_;
2212 :
2213 : // For keeping track of context disposals.
2214 : int contexts_disposed_;
2215 :
2216 : // The length of the retained_maps array at the time of context disposal.
2217 : // This separates maps in the retained_maps array that were created before
2218 : // and after context disposal.
2219 : int number_of_disposed_maps_;
2220 :
2221 : int global_ic_age_;
2222 :
2223 : NewSpace* new_space_;
2224 : OldSpace* old_space_;
2225 : OldSpace* code_space_;
2226 : MapSpace* map_space_;
2227 : LargeObjectSpace* lo_space_;
2228 : // Map from the space id to the space.
2229 : Space* space_[LAST_SPACE + 1];
2230 : HeapState gc_state_;
2231 : int gc_post_processing_depth_;
2232 : Address new_space_top_after_last_gc_;
2233 :
2234 : // Returns the amount of external memory registered since last global gc.
2235 : uint64_t PromotedExternalMemorySize();
2236 :
2237 : // How many "runtime allocations" happened.
2238 : uint32_t allocations_count_;
2239 :
2240 : // Running hash over allocations performed.
2241 : uint32_t raw_allocations_hash_;
2242 :
2243 : // How many mark-sweep collections happened.
2244 : unsigned int ms_count_;
2245 :
2246 : // How many gc happened.
2247 : unsigned int gc_count_;
2248 :
2249 : // For post mortem debugging.
2250 : int remembered_unmapped_pages_index_;
2251 : Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2252 :
2253 : #ifdef DEBUG
2254 : // If the --gc-interval flag is set to a positive value, this
2255 : // variable holds the value indicating the number of allocations
2256 : // remain until the next failure and garbage collection.
2257 : int allocation_timeout_;
2258 : #endif // DEBUG
2259 :
2260 : // Limit that triggers a global GC on the next (normally caused) GC. This
2261 : // is checked when we have already decided to do a GC to help determine
2262 : // which collector to invoke, before expanding a paged space in the old
2263 : // generation and on every allocation in large object space.
2264 : size_t old_generation_allocation_limit_;
2265 :
2266 : // Indicates that inline bump-pointer allocation has been globally disabled
2267 : // for all spaces. This is used to disable allocations in generated code.
2268 : bool inline_allocation_disabled_;
2269 :
2270 : // Weak list heads, threaded through the objects.
2271 : // List heads are initialized lazily and contain the undefined_value at start.
2272 : Object* native_contexts_list_;
2273 : Object* allocation_sites_list_;
2274 :
2275 : // List of encountered weak collections (JSWeakMap and JSWeakSet) during
2276 : // marking. It is initialized during marking, destroyed after marking and
2277 : // contains Smi(0) while marking is not active.
2278 : Object* encountered_weak_collections_;
2279 :
2280 : Object* encountered_weak_cells_;
2281 :
2282 : Object* encountered_transition_arrays_;
2283 :
2284 : List<GCCallbackPair> gc_epilogue_callbacks_;
2285 : List<GCCallbackPair> gc_prologue_callbacks_;
2286 :
2287 : int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
2288 :
2289 : GCTracer* tracer_;
2290 :
2291 : size_t promoted_objects_size_;
2292 : double promotion_ratio_;
2293 : double promotion_rate_;
2294 : size_t semi_space_copied_object_size_;
2295 : size_t previous_semi_space_copied_object_size_;
2296 : double semi_space_copied_rate_;
2297 : int nodes_died_in_new_space_;
2298 : int nodes_copied_in_new_space_;
2299 : int nodes_promoted_;
2300 :
2301 : // This is the pretenuring trigger for allocation sites that are in maybe
2302 : // tenure state. When we switched to the maximum new space size we deoptimize
2303 : // the code that belongs to the allocation site and derive the lifetime
2304 : // of the allocation site.
2305 : unsigned int maximum_size_scavenges_;
2306 :
2307 : // Total time spent in GC.
2308 : double total_gc_time_ms_;
2309 :
2310 : // Last time an idle notification happened.
2311 : double last_idle_notification_time_;
2312 :
2313 : // Last time a garbage collection happened.
2314 : double last_gc_time_;
2315 :
2316 : Scavenger* scavenge_collector_;
2317 :
2318 : MarkCompactCollector* mark_compact_collector_;
2319 : MinorMarkCompactCollector* minor_mark_compact_collector_;
2320 :
2321 : MemoryAllocator* memory_allocator_;
2322 :
2323 : StoreBuffer* store_buffer_;
2324 :
2325 : IncrementalMarking* incremental_marking_;
2326 : ConcurrentMarking* concurrent_marking_;
2327 :
2328 : GCIdleTimeHandler* gc_idle_time_handler_;
2329 :
2330 : MemoryReducer* memory_reducer_;
2331 :
2332 : ObjectStats* live_object_stats_;
2333 : ObjectStats* dead_object_stats_;
2334 :
2335 : ScavengeJob* scavenge_job_;
2336 :
2337 : AllocationObserver* idle_scavenge_observer_;
2338 :
2339 : // This counter is increased before each GC and never reset.
2340 : // To account for the bytes allocated since the last GC, use the
2341 : // NewSpaceAllocationCounter() function.
2342 : size_t new_space_allocation_counter_;
2343 :
2344 : // This counter is increased before each GC and never reset. To
2345 : // account for the bytes allocated since the last GC, use the
2346 : // OldGenerationAllocationCounter() function.
2347 : size_t old_generation_allocation_counter_at_last_gc_;
2348 :
2349 : // The size of objects in old generation after the last MarkCompact GC.
2350 : size_t old_generation_size_at_last_gc_;
2351 :
2352 : // If the --deopt_every_n_garbage_collections flag is set to a positive value,
2353 : // this variable holds the number of garbage collections since the last
2354 : // deoptimization triggered by garbage collection.
2355 : int gcs_since_last_deopt_;
2356 :
2357 : // The feedback storage is used to store allocation sites (keys) and how often
2358 : // they have been visited (values) by finding a memento behind an object. The
2359 : // storage is only alive temporary during a GC. The invariant is that all
2360 : // pointers in this map are already fixed, i.e., they do not point to
2361 : // forwarding pointers.
2362 : base::HashMap* global_pretenuring_feedback_;
2363 :
2364 : char trace_ring_buffer_[kTraceRingBufferSize];
2365 : // If it's not full then the data is from 0 to ring_buffer_end_. If it's
2366 : // full then the data is from ring_buffer_end_ to the end of the buffer and
2367 : // from 0 to ring_buffer_end_.
2368 : bool ring_buffer_full_;
2369 : size_t ring_buffer_end_;
2370 :
2371 : // Shared state read by the scavenge collector and set by ScavengeObject.
2372 : PromotionQueue promotion_queue_;
2373 :
2374 : // Flag is set when the heap has been configured. The heap can be repeatedly
2375 : // configured through the API until it is set up.
2376 : bool configured_;
2377 :
2378 : // Currently set GC flags that are respected by all GC components.
2379 : int current_gc_flags_;
2380 :
2381 : // Currently set GC callback flags that are used to pass information between
2382 : // the embedder and V8's GC.
2383 : GCCallbackFlags current_gc_callback_flags_;
2384 :
2385 : ExternalStringTable external_string_table_;
2386 :
2387 : base::Mutex relocation_mutex_;
2388 :
2389 : int gc_callbacks_depth_;
2390 :
2391 : bool deserialization_complete_;
2392 :
2393 : StrongRootsList* strong_roots_list_;
2394 :
2395 : // The depth of HeapIterator nestings.
2396 : int heap_iterator_depth_;
2397 :
2398 : LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
2399 :
2400 : bool fast_promotion_mode_;
2401 :
2402 : // Used for testing purposes.
2403 : bool force_oom_;
2404 : bool delay_sweeper_tasks_for_testing_;
2405 :
2406 : HeapObject* pending_layout_change_object_;
2407 :
2408 : // Classes in "heap" can be friends.
2409 : friend class AlwaysAllocateScope;
2410 : friend class ConcurrentMarking;
2411 : friend class GCCallbacksScope;
2412 : friend class GCTracer;
2413 : friend class HeapIterator;
2414 : friend class IdleScavengeObserver;
2415 : friend class IncrementalMarking;
2416 : friend class IncrementalMarkingJob;
2417 : friend class LargeObjectSpace;
2418 : friend class MarkCompactCollector;
2419 : friend class MinorMarkCompactCollector;
2420 : friend class MarkCompactMarkingVisitor;
2421 : friend class NewSpace;
2422 : friend class ObjectStatsCollector;
2423 : friend class Page;
2424 : friend class PagedSpace;
2425 : friend class Scavenger;
2426 : friend class StoreBuffer;
2427 : friend class TestMemoryAllocatorScope;
2428 :
2429 : // The allocator interface.
2430 : friend class Factory;
2431 :
2432 : // The Isolate constructs us.
2433 : friend class Isolate;
2434 :
2435 : // Used in cctest.
2436 : friend class HeapTester;
2437 :
2438 : DISALLOW_COPY_AND_ASSIGN(Heap);
2439 : };
2440 :
2441 :
2442 : class HeapStats {
2443 : public:
2444 : static const int kStartMarker = 0xDECADE00;
2445 : static const int kEndMarker = 0xDECADE01;
2446 :
2447 : intptr_t* start_marker; // 0
2448 : size_t* new_space_size; // 1
2449 : size_t* new_space_capacity; // 2
2450 : size_t* old_space_size; // 3
2451 : size_t* old_space_capacity; // 4
2452 : size_t* code_space_size; // 5
2453 : size_t* code_space_capacity; // 6
2454 : size_t* map_space_size; // 7
2455 : size_t* map_space_capacity; // 8
2456 : size_t* lo_space_size; // 9
2457 : size_t* global_handle_count; // 10
2458 : size_t* weak_global_handle_count; // 11
2459 : size_t* pending_global_handle_count; // 12
2460 : size_t* near_death_global_handle_count; // 13
2461 : size_t* free_global_handle_count; // 14
2462 : size_t* memory_allocator_size; // 15
2463 : size_t* memory_allocator_capacity; // 16
2464 : size_t* malloced_memory; // 17
2465 : size_t* malloced_peak_memory; // 18
2466 : size_t* objects_per_type; // 19
2467 : size_t* size_per_type; // 20
2468 : int* os_error; // 21
2469 : char* last_few_messages; // 22
2470 : char* js_stacktrace; // 23
2471 : intptr_t* end_marker; // 24
2472 : };
2473 :
2474 :
2475 : class AlwaysAllocateScope {
2476 : public:
2477 : explicit inline AlwaysAllocateScope(Isolate* isolate);
2478 : inline ~AlwaysAllocateScope();
2479 :
2480 : private:
2481 : Heap* heap_;
2482 : };
2483 :
2484 :
2485 : // Visitor class to verify interior pointers in spaces that do not contain
2486 : // or care about intergenerational references. All heap object pointers have to
2487 : // point into the heap to a location that has a map pointer at its first word.
2488 : // Caveat: Heap::Contains is an approximation because it can return true for
2489 : // objects in a heap space but above the allocation pointer.
2490 : class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2491 : public:
2492 : inline void VisitPointers(HeapObject* host, Object** start,
2493 : Object** end) override;
2494 : inline void VisitRootPointers(Root root, Object** start,
2495 : Object** end) override;
2496 :
2497 : private:
2498 : inline void VerifyPointers(Object** start, Object** end);
2499 : };
2500 :
2501 :
2502 : // Verify that all objects are Smis.
2503 : class VerifySmisVisitor : public RootVisitor {
2504 : public:
2505 : inline void VisitRootPointers(Root root, Object** start,
2506 : Object** end) override;
2507 : };
2508 :
2509 :
2510 : // Space iterator for iterating over all spaces of the heap. Returns each space
2511 : // in turn, and null when it is done.
2512 : class AllSpaces BASE_EMBEDDED {
2513 : public:
2514 979397 : explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2515 : Space* next();
2516 :
2517 : private:
2518 : Heap* heap_;
2519 : int counter_;
2520 : };
2521 :
2522 :
2523 : // Space iterator for iterating over all old spaces of the heap: Old space
2524 : // and code space. Returns each space in turn, and null when it is done.
2525 : class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
2526 : public:
2527 245014 : explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2528 : OldSpace* next();
2529 :
2530 : private:
2531 : Heap* heap_;
2532 : int counter_;
2533 : };
2534 :
2535 :
2536 : // Space iterator for iterating over all the paged spaces of the heap: Map
2537 : // space, old space, code space and cell space. Returns
2538 : // each space in turn, and null when it is done.
2539 : class PagedSpaces BASE_EMBEDDED {
2540 : public:
2541 174941 : explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
2542 : PagedSpace* next();
2543 :
2544 : private:
2545 : Heap* heap_;
2546 : int counter_;
2547 : };
2548 :
2549 :
2550 : class SpaceIterator : public Malloced {
2551 : public:
2552 : explicit SpaceIterator(Heap* heap);
2553 : virtual ~SpaceIterator();
2554 :
2555 : bool has_next();
2556 : Space* next();
2557 :
2558 : private:
2559 : Heap* heap_;
2560 : int current_space_; // from enum AllocationSpace.
2561 : };
2562 :
2563 :
2564 : // A HeapIterator provides iteration over the whole heap. It
2565 : // aggregates the specific iterators for the different spaces as
2566 : // these can only iterate over one space only.
2567 : //
2568 : // HeapIterator ensures there is no allocation during its lifetime
2569 : // (using an embedded DisallowHeapAllocation instance).
2570 : //
2571 : // HeapIterator can skip free list nodes (that is, de-allocated heap
2572 : // objects that still remain in the heap). As implementation of free
2573 : // nodes filtering uses GC marks, it can't be used during MS/MC GC
2574 : // phases. Also, it is forbidden to interrupt iteration in this mode,
2575 : // as this will leave heap objects marked (and thus, unusable).
2576 : class HeapIterator BASE_EMBEDDED {
2577 : public:
2578 : enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2579 :
2580 : explicit HeapIterator(Heap* heap,
2581 : HeapObjectsFiltering filtering = kNoFiltering);
2582 : ~HeapIterator();
2583 :
2584 : HeapObject* next();
2585 :
2586 : private:
2587 : HeapObject* NextObject();
2588 :
2589 : DisallowHeapAllocation no_heap_allocation_;
2590 :
2591 : Heap* heap_;
2592 : HeapObjectsFiltering filtering_;
2593 : HeapObjectsFilter* filter_;
2594 : // Space iterator for iterating all the spaces.
2595 : SpaceIterator* space_iterator_;
2596 : // Object iterator for the space currently being iterated.
2597 : std::unique_ptr<ObjectIterator> object_iterator_;
2598 : };
2599 :
2600 : // Abstract base class for checking whether a weak object should be retained.
2601 69189 : class WeakObjectRetainer {
2602 : public:
2603 175881 : virtual ~WeakObjectRetainer() {}
2604 :
2605 : // Return whether this object should be retained. If NULL is returned the
2606 : // object has no references. Otherwise the address of the retained object
2607 : // should be returned as in some GC situations the object has been moved.
2608 : virtual Object* RetainAs(Object* object) = 0;
2609 : };
2610 :
2611 : // -----------------------------------------------------------------------------
2612 : // Allows observation of allocations.
2613 : class AllocationObserver {
2614 : public:
2615 : explicit AllocationObserver(intptr_t step_size)
2616 182418 : : step_size_(step_size), bytes_to_next_step_(step_size) {
2617 : DCHECK(step_size >= kPointerSize);
2618 : }
2619 177927 : virtual ~AllocationObserver() {}
2620 :
2621 : // Called each time the observed space does an allocation step. This may be
2622 : // more frequently than the step_size we are monitoring (e.g. when there are
2623 : // multiple observers, or when page or space boundary is encountered.)
2624 8911196 : void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
2625 8911196 : bytes_to_next_step_ -= bytes_allocated;
2626 8911196 : if (bytes_to_next_step_ <= 0) {
2627 : Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
2628 174948 : size);
2629 174948 : step_size_ = GetNextStepSize();
2630 174948 : bytes_to_next_step_ = step_size_;
2631 : }
2632 8911196 : }
2633 :
2634 : protected:
2635 : intptr_t step_size() const { return step_size_; }
2636 : intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2637 :
2638 : // Pure virtual method provided by the subclasses that gets called when at
2639 : // least step_size bytes have been allocated. soon_object is the address just
2640 : // allocated (but not yet initialized.) size is the size of the object as
2641 : // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2642 : // of:
2643 : // 1) soon_object will be nullptr in cases where we end up observing an
2644 : // allocation that happens to be a filler space (e.g. page boundaries.)
2645 : // 2) size is the requested size at the time of allocation. Right-trimming
2646 : // may change the object size dynamically.
2647 : // 3) soon_object may actually be the first object in an allocation-folding
2648 : // group. In such a case size is the size of the group rather than the
2649 : // first object.
2650 : virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2651 :
2652 : // Subclasses can override this method to make step size dynamic.
2653 125903 : virtual intptr_t GetNextStepSize() { return step_size_; }
2654 :
2655 : intptr_t step_size_;
2656 : intptr_t bytes_to_next_step_;
2657 :
2658 : private:
2659 : friend class LargeObjectSpace;
2660 : friend class NewSpace;
2661 : friend class PagedSpace;
2662 : DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2663 : };
2664 :
2665 : V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
2666 :
2667 : } // namespace internal
2668 : } // namespace v8
2669 :
2670 : #endif // V8_HEAP_HEAP_H_
|