Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_HEAP_H_
6 : #define V8_HEAP_HEAP_H_
7 :
8 : #include <cmath>
9 : #include <map>
10 : #include <unordered_map>
11 : #include <unordered_set>
12 : #include <vector>
13 :
14 : // Clients of this interface shouldn't depend on lots of heap internals.
15 : // Do not include anything from src/heap here!
16 : #include "include/v8-internal.h"
17 : #include "include/v8.h"
18 : #include "src/accessors.h"
19 : #include "src/allocation.h"
20 : #include "src/assert-scope.h"
21 : #include "src/base/atomic-utils.h"
22 : #include "src/globals.h"
23 : #include "src/heap-symbols.h"
24 : #include "src/objects.h"
25 : #include "src/objects/allocation-site.h"
26 : #include "src/objects/fixed-array.h"
27 : #include "src/objects/heap-object.h"
28 : #include "src/objects/smi.h"
29 : #include "src/objects/string-table.h"
30 : #include "src/roots.h"
31 : #include "src/visitors.h"
32 :
33 : namespace v8 {
34 :
35 : namespace debug {
36 : typedef void (*OutOfMemoryCallback)(void* data);
37 : } // namespace debug
38 :
39 : namespace internal {
40 :
41 : namespace heap {
42 : class HeapTester;
43 : class TestMemoryAllocatorScope;
44 : } // namespace heap
45 :
46 : class ObjectBoilerplateDescription;
47 : class BytecodeArray;
48 : class CodeDataContainer;
49 : class DeoptimizationData;
50 : class HandlerTable;
51 : class IncrementalMarking;
52 : class JSArrayBuffer;
53 : class ExternalString;
54 : using v8::MemoryPressureLevel;
55 :
56 : class AllocationObserver;
57 : class ArrayBufferCollector;
58 : class ArrayBufferTracker;
59 : class CodeLargeObjectSpace;
60 : class ConcurrentMarking;
61 : class GCIdleTimeAction;
62 : class GCIdleTimeHandler;
63 : class GCIdleTimeHeapState;
64 : class GCTracer;
65 : class HeapController;
66 : class HeapObjectAllocationTracker;
67 : class HeapObjectsFilter;
68 : class HeapStats;
69 : class HistogramTimer;
70 : class Isolate;
71 : class JSFinalizationGroup;
72 : class LocalEmbedderHeapTracer;
73 : class MemoryAllocator;
74 : class MemoryReducer;
75 : class MinorMarkCompactCollector;
76 : class ObjectIterator;
77 : class ObjectStats;
78 : class Page;
79 : class PagedSpace;
80 : class RootVisitor;
81 : class ScavengeJob;
82 : class Scavenger;
83 : class ScavengerCollector;
84 : class Space;
85 : class StoreBuffer;
86 : class StressScavengeObserver;
87 : class TimedHistogram;
88 : class TracePossibleWrapperReporter;
89 : class WeakObjectRetainer;
90 :
91 : enum ArrayStorageAllocationMode {
92 : DONT_INITIALIZE_ARRAY_ELEMENTS,
93 : INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
94 : };
95 :
96 : enum class ClearRecordedSlots { kYes, kNo };
97 :
98 : enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
99 :
100 : enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
101 :
102 : enum class FixedArrayVisitationMode { kRegular, kIncremental };
103 :
104 : enum class TraceRetainingPathMode { kEnabled, kDisabled };
105 :
106 : enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
107 :
108 : enum class GarbageCollectionReason {
109 : kUnknown = 0,
110 : kAllocationFailure = 1,
111 : kAllocationLimit = 2,
112 : kContextDisposal = 3,
113 : kCountersExtension = 4,
114 : kDebugger = 5,
115 : kDeserializer = 6,
116 : kExternalMemoryPressure = 7,
117 : kFinalizeMarkingViaStackGuard = 8,
118 : kFinalizeMarkingViaTask = 9,
119 : kFullHashtable = 10,
120 : kHeapProfiler = 11,
121 : kIdleTask = 12,
122 : kLastResort = 13,
123 : kLowMemoryNotification = 14,
124 : kMakeHeapIterable = 15,
125 : kMemoryPressure = 16,
126 : kMemoryReducer = 17,
127 : kRuntime = 18,
128 : kSamplingProfiler = 19,
129 : kSnapshotCreator = 20,
130 : kTesting = 21,
131 : kExternalFinalize = 22
132 : // If you add new items here, then update the incremental_marking_reason,
133 : // mark_compact_reason, and scavenge_reason counters in counters.h.
134 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
135 : };
136 :
137 : enum class YoungGenerationHandling {
138 : kRegularScavenge = 0,
139 : kFastPromotionDuringScavenge = 1,
140 : // Histogram::InspectConstructionArguments in chromium requires us to have at
141 : // least three buckets.
142 : kUnusedBucket = 2,
143 : // If you add new items here, then update the young_generation_handling in
144 : // counters.h.
145 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
146 : };
147 :
148 : class AllocationResult {
149 : public:
150 20046 : static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
151 20046 : return AllocationResult(space);
152 : }
153 :
154 : // Implicit constructor from Object.
155 520719019 : AllocationResult(Object object) // NOLINT
156 520719019 : : object_(object) {
157 : // AllocationResults can't return Smis, which are used to represent
158 : // failure and the space to retry in.
159 520511235 : CHECK(!object->IsSmi());
160 520501500 : }
161 :
162 708584518 : AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
163 :
164 1043657131 : inline bool IsRetry() { return object_->IsSmi(); }
165 : inline HeapObject ToObjectChecked();
166 : inline AllocationSpace RetrySpace();
167 :
168 : template <typename T>
169 1043644609 : bool To(T* obj) {
170 1043445343 : if (IsRetry()) return false;
171 1042854758 : *obj = T::cast(object_);
172 1042854758 : return true;
173 : }
174 :
175 : private:
176 0 : explicit AllocationResult(AllocationSpace space)
177 20409 : : object_(Smi::FromInt(static_cast<int>(space))) {}
178 :
179 : Object object_;
180 : };
181 :
182 : STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
183 :
184 : #ifdef DEBUG
185 : struct CommentStatistic {
186 : const char* comment;
187 : int size;
188 : int count;
189 : void Clear() {
190 : comment = nullptr;
191 : size = 0;
192 : count = 0;
193 : }
194 : // Must be small, since an iteration is used for lookup.
195 : static const int kMaxComments = 64;
196 : };
197 : #endif
198 :
199 244136 : class Heap {
200 : public:
201 : enum FindMementoMode { kForRuntime, kForGC };
202 :
203 : enum HeapState {
204 : NOT_IN_GC,
205 : SCAVENGE,
206 : MARK_COMPACT,
207 : MINOR_MARK_COMPACT,
208 : TEAR_DOWN
209 : };
210 :
211 : using PretenuringFeedbackMap =
212 : std::unordered_map<AllocationSite, size_t, Object::Hasher>;
213 :
214 : // Taking this mutex prevents the GC from entering a phase that relocates
215 : // object references.
216 : base::Mutex* relocation_mutex() { return &relocation_mutex_; }
217 :
218 : // Support for partial snapshots. After calling this we have a linear
219 : // space to write objects in each space.
220 : struct Chunk {
221 : uint32_t size;
222 : Address start;
223 : Address end;
224 : };
225 : typedef std::vector<Chunk> Reservation;
226 :
227 : static const int kInitalOldGenerationLimitFactor = 2;
228 :
229 : #if V8_OS_ANDROID
230 : // Don't apply pointer multiplier on Android since it has no swap space and
231 : // should instead adapt it's heap size based on available physical memory.
232 : static const int kPointerMultiplier = 1;
233 : #else
234 : // TODO(ishell): kSystePointerMultiplier?
235 : static const int kPointerMultiplier = i::kSystemPointerSize / 4;
236 : #endif
237 :
238 : // Semi-space size needs to be a multiple of page size.
239 : static const size_t kMinSemiSpaceSizeInKB =
240 : 1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
241 : static const size_t kMaxSemiSpaceSizeInKB =
242 : 16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
243 :
244 : static const int kTraceRingBufferSize = 512;
245 : static const int kStacktraceBufferSize = 512;
246 :
247 : static const int kNoGCFlags = 0;
248 : static const int kReduceMemoryFootprintMask = 1;
249 :
250 : // The minimum size of a HeapObject on the heap.
251 : static const int kMinObjectSizeInTaggedWords = 2;
252 :
253 : static const int kMinPromotedPercentForFastPromotionMode = 90;
254 :
255 : STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
256 : Internals::kUndefinedValueRootIndex);
257 : STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
258 : Internals::kTheHoleValueRootIndex);
259 : STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
260 : Internals::kNullValueRootIndex);
261 : STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
262 : Internals::kTrueValueRootIndex);
263 : STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
264 : Internals::kFalseValueRootIndex);
265 : STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
266 : Internals::kEmptyStringRootIndex);
267 :
268 : // Calculates the maximum amount of filler that could be required by the
269 : // given alignment.
270 : static int GetMaximumFillToAlign(AllocationAlignment alignment);
271 : // Calculates the actual amount of filler required for a given address at the
272 : // given alignment.
273 : static int GetFillToAlign(Address address, AllocationAlignment alignment);
274 :
275 : void FatalProcessOutOfMemory(const char* location);
276 :
277 : // Checks whether the space is valid.
278 : static bool IsValidAllocationSpace(AllocationSpace space);
279 :
280 : // Zapping is needed for verify heap, and always done in debug builds.
281 : static inline bool ShouldZapGarbage() {
282 : #ifdef DEBUG
283 : return true;
284 : #else
285 : #ifdef VERIFY_HEAP
286 : return FLAG_verify_heap;
287 : #else
288 : return false;
289 : #endif
290 : #endif
291 : }
292 :
293 : static uintptr_t ZapValue() {
294 0 : return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
295 : }
296 :
297 : static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
298 587980 : return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
299 : }
300 :
301 : static inline GarbageCollector YoungGenerationCollector() {
302 : #if ENABLE_MINOR_MC
303 23490 : return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
304 : #else
305 : return SCAVENGER;
306 : #endif // ENABLE_MINOR_MC
307 : }
308 :
309 : static inline const char* CollectorName(GarbageCollector collector) {
310 0 : switch (collector) {
311 : case SCAVENGER:
312 : return "Scavenger";
313 : case MARK_COMPACTOR:
314 : return "Mark-Compact";
315 : case MINOR_MARK_COMPACTOR:
316 : return "Minor Mark-Compact";
317 : }
318 : return "Unknown collector";
319 : }
320 :
321 : // Copy block of memory from src to dst. Size of block should be aligned
322 : // by pointer size.
323 : static inline void CopyBlock(Address dst, Address src, int byte_size);
324 :
325 : V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
326 : V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
327 : Address slot,
328 : HeapObject value);
329 : V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
330 : Heap* heap, FixedArray array, int offset, int length);
331 : V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
332 : Code host, RelocInfo* rinfo, HeapObject value);
333 : V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
334 : Address slot,
335 : HeapObject value);
336 : V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
337 : Heap* heap, HeapObject object);
338 : V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
339 : RelocInfo* rinfo,
340 : HeapObject value);
341 : V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
342 : Heap* heap, HeapObject host, HeapObject descriptor_array,
343 : int number_of_own_descriptors);
344 : V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
345 :
346 : // Notifies the heap that is ok to start marking or other activities that
347 : // should not happen during deserialization.
348 : void NotifyDeserializationComplete();
349 :
350 : void NotifyBootstrapComplete();
351 :
352 : void NotifyOldGenerationExpansion();
353 :
354 : inline Address* NewSpaceAllocationTopAddress();
355 : inline Address* NewSpaceAllocationLimitAddress();
356 : inline Address* OldSpaceAllocationTopAddress();
357 : inline Address* OldSpaceAllocationLimitAddress();
358 :
359 : // Move len elements within a given array from src_index index to dst_index
360 : // index.
361 : void MoveElements(FixedArray array, int dst_index, int src_index, int len,
362 : WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
363 :
364 : // Initialize a filler object to keep the ability to iterate over the heap
365 : // when introducing gaps within pages. If slots could have been recorded in
366 : // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
367 : // pass ClearRecordedSlots::kNo. If the memory after the object header of
368 : // the filler should be cleared, pass in kClearFreedMemory. The default is
369 : // kDontClearFreedMemory.
370 : V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
371 : Address addr, int size, ClearRecordedSlots clear_slots_mode,
372 : ClearFreedMemoryMode clear_memory_mode =
373 : ClearFreedMemoryMode::kDontClearFreedMemory);
374 :
375 : template <typename T>
376 : void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
377 :
378 : bool CanMoveObjectStart(HeapObject object);
379 :
380 : bool IsImmovable(HeapObject object);
381 :
382 : static bool IsLargeObject(HeapObject object);
383 :
384 : // Trim the given array from the left. Note that this relocates the object
385 : // start and hence is only valid if there is only a single reference to it.
386 : FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
387 :
388 : // Trim the given array from the right.
389 : void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
390 : void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
391 :
392 : // Converts the given boolean condition to JavaScript boolean value.
393 : inline Oddball ToBoolean(bool condition);
394 :
395 : // Notify the heap that a context has been disposed.
396 : int NotifyContextDisposed(bool dependant_context);
397 :
398 56 : void set_native_contexts_list(Object object) {
399 384409 : native_contexts_list_ = object;
400 56 : }
401 : Object native_contexts_list() const { return native_contexts_list_; }
402 :
403 56 : void set_allocation_sites_list(Object object) {
404 363830 : allocation_sites_list_ = object;
405 56 : }
406 : Object allocation_sites_list() { return allocation_sites_list_; }
407 :
408 : // Used in CreateAllocationSiteStub and the (de)serializer.
409 : Address allocation_sites_list_address() {
410 61441 : return reinterpret_cast<Address>(&allocation_sites_list_);
411 : }
412 :
413 : // Traverse all the allocaions_sites [nested_site and weak_next] in the list
414 : // and foreach call the visitor
415 : void ForeachAllocationSite(
416 : Object list, const std::function<void(AllocationSite)>& visitor);
417 :
418 : // Number of mark-sweeps.
419 20 : int ms_count() const { return ms_count_; }
420 :
421 : // Checks whether the given object is allowed to be migrated from it's
422 : // current space into the given destination space. Used for debugging.
423 : bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest);
424 :
425 : void CheckHandleCount();
426 :
427 : // Number of "runtime allocations" done so far.
428 : uint32_t allocations_count() { return allocations_count_; }
429 :
430 : // Print short heap statistics.
431 : void PrintShortHeapStatistics();
432 :
433 20991 : bool write_protect_code_memory() const { return write_protect_code_memory_; }
434 :
435 : uintptr_t code_space_memory_modification_scope_depth() {
436 : return code_space_memory_modification_scope_depth_;
437 : }
438 :
439 56 : void increment_code_space_memory_modification_scope_depth() {
440 420617 : code_space_memory_modification_scope_depth_++;
441 56 : }
442 :
443 20935 : void decrement_code_space_memory_modification_scope_depth() {
444 420617 : code_space_memory_modification_scope_depth_--;
445 20935 : }
446 :
447 : void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
448 : void UnprotectAndRegisterMemoryChunk(HeapObject object);
449 : void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
450 : V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
451 :
452 : void EnableUnprotectedMemoryChunksRegistry() {
453 1758360 : unprotected_memory_chunks_registry_enabled_ = true;
454 : }
455 :
456 : void DisableUnprotectedMemoryChunksRegistry() {
457 1758363 : unprotected_memory_chunks_registry_enabled_ = false;
458 : }
459 :
460 : bool unprotected_memory_chunks_registry_enabled() {
461 : return unprotected_memory_chunks_registry_enabled_;
462 : }
463 :
464 : inline HeapState gc_state() { return gc_state_; }
465 : void SetGCState(HeapState state);
466 : bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
467 :
468 : inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
469 :
470 : // If an object has an AllocationMemento trailing it, return it, otherwise
471 : // return a null AllocationMemento.
472 : template <FindMementoMode mode>
473 : inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
474 :
475 : // Returns false if not able to reserve.
476 : bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
477 :
478 : //
479 : // Support for the API.
480 : //
481 :
482 : void CreateApiObjects();
483 :
484 : // Implements the corresponding V8 API function.
485 : bool IdleNotification(double deadline_in_seconds);
486 : bool IdleNotification(int idle_time_in_ms);
487 :
488 : void MemoryPressureNotification(MemoryPressureLevel level,
489 : bool is_isolate_locked);
490 : void CheckMemoryPressure();
491 :
492 : void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
493 : void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
494 : size_t heap_limit);
495 : void AutomaticallyRestoreInitialHeapLimit(double threshold_percent);
496 :
497 : double MonotonicallyIncreasingTimeInMs();
498 :
499 : void RecordStats(HeapStats* stats, bool take_snapshot = false);
500 :
501 : // Check new space expansion criteria and expand semispaces if it was hit.
502 : void CheckNewSpaceExpansionCriteria();
503 :
504 : void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
505 :
506 : // An object should be promoted if the object has survived a
507 : // scavenge operation.
508 : inline bool ShouldBePromoted(Address old_address);
509 :
510 : void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
511 :
512 : inline int NextScriptId();
513 : inline int NextDebuggingId();
514 : inline int GetNextTemplateSerialNumber();
515 :
516 : void SetSerializedObjects(FixedArray objects);
517 : void SetSerializedGlobalProxySizes(FixedArray sizes);
518 :
519 : // For post mortem debugging.
520 : void RememberUnmappedPage(Address page, bool compacted);
521 :
522 970604 : int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
523 :
524 : V8_INLINE int64_t external_memory();
525 : V8_INLINE void update_external_memory(int64_t delta);
526 : V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
527 : V8_INLINE void account_external_memory_concurrently_freed();
528 :
529 : size_t backing_store_bytes() const { return backing_store_bytes_; }
530 :
531 : void CompactWeakArrayLists(PretenureFlag pretenure);
532 :
533 : void AddRetainedMap(Handle<Map> map);
534 :
535 : // This event is triggered after successful allocation of a new object made
536 : // by runtime. Allocations of target space for object evacuation do not
537 : // trigger the event. In order to track ALL allocations one must turn off
538 : // FLAG_inline_new.
539 : inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
540 :
541 : // This event is triggered after object is moved to a new place.
542 : void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
543 :
544 : inline bool CanAllocateInReadOnlySpace();
545 : bool deserialization_complete() const { return deserialization_complete_; }
546 :
547 : bool HasLowAllocationRate();
548 : bool HasHighFragmentation();
549 : bool HasHighFragmentation(size_t used, size_t committed);
550 :
551 : void ActivateMemoryReducerIfNeeded();
552 :
553 : bool ShouldOptimizeForMemoryUsage();
554 :
555 : bool HighMemoryPressure() {
556 : return memory_pressure_level_ != MemoryPressureLevel::kNone;
557 : }
558 :
559 5 : void RestoreHeapLimit(size_t heap_limit) {
560 : // Do not set the limit lower than the live size + some slack.
561 5 : size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
562 : max_old_generation_size_ =
563 10 : Min(max_old_generation_size_, Max(heap_limit, min_limit));
564 5 : }
565 :
566 : // ===========================================================================
567 : // Initialization. ===========================================================
568 : // ===========================================================================
569 :
570 : // Configure heap sizes
571 : // max_semi_space_size_in_kb: maximum semi-space size in KB
572 : // max_old_generation_size_in_mb: maximum old generation size in MB
573 : // code_range_size_in_mb: code range size in MB
574 : void ConfigureHeap(size_t max_semi_space_size_in_kb,
575 : size_t max_old_generation_size_in_mb,
576 : size_t code_range_size_in_mb);
577 : void ConfigureHeapDefault();
578 :
579 : // Prepares the heap, setting up memory areas that are needed in the isolate
580 : // without actually creating any objects.
581 : void SetUp();
582 :
583 : // (Re-)Initialize hash seed from flag or RNG.
584 : void InitializeHashSeed();
585 :
586 : // Bootstraps the object heap with the core set of objects required to run.
587 : // Returns whether it succeeded.
588 : bool CreateHeapObjects();
589 :
590 : // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
591 : void CreateObjectStats();
592 :
593 : // Sets the TearDown state, so no new GC tasks get posted.
594 : void StartTearDown();
595 :
596 : // Destroys all memory allocated by the heap.
597 : void TearDown();
598 :
599 : // Returns whether SetUp has been called.
600 : bool HasBeenSetUp();
601 :
602 : // ===========================================================================
603 : // Getters for spaces. =======================================================
604 : // ===========================================================================
605 :
606 : inline Address NewSpaceTop();
607 :
608 : NewSpace* new_space() { return new_space_; }
609 : OldSpace* old_space() { return old_space_; }
610 20991 : CodeSpace* code_space() { return code_space_; }
611 : MapSpace* map_space() { return map_space_; }
612 : LargeObjectSpace* lo_space() { return lo_space_; }
613 20991 : CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
614 : NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
615 : ReadOnlySpace* read_only_space() { return read_only_space_; }
616 :
617 : inline PagedSpace* paged_space(int idx);
618 : inline Space* space(int idx);
619 :
620 : // Returns name of the space.
621 : const char* GetSpaceName(int idx);
622 :
623 : // ===========================================================================
624 : // Getters to other components. ==============================================
625 : // ===========================================================================
626 :
627 : GCTracer* tracer() { return tracer_.get(); }
628 :
629 90902 : MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
630 :
631 : inline Isolate* isolate();
632 :
633 51747140 : MarkCompactCollector* mark_compact_collector() {
634 51747140 : return mark_compact_collector_.get();
635 : }
636 :
637 : MinorMarkCompactCollector* minor_mark_compact_collector() {
638 : return minor_mark_compact_collector_;
639 : }
640 :
641 : ArrayBufferCollector* array_buffer_collector() {
642 : return array_buffer_collector_.get();
643 : }
644 :
645 : // ===========================================================================
646 : // Root set access. ==========================================================
647 : // ===========================================================================
648 :
649 : // Shortcut to the roots table stored in the Isolate.
650 : V8_INLINE RootsTable& roots_table();
651 :
652 : // Heap root getters.
653 : #define ROOT_ACCESSOR(type, name, CamelName) inline type name();
654 : MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
655 : #undef ROOT_ACCESSOR
656 :
657 : V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
658 : V8_INLINE void SetRootScriptList(Object value);
659 : V8_INLINE void SetRootStringTable(StringTable value);
660 : V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
661 : V8_INLINE void SetMessageListeners(TemplateList value);
662 :
663 : // Set the stack limit in the roots table. Some architectures generate
664 : // code that looks here, because it is faster than loading from the static
665 : // jslimit_/real_jslimit_ variable in the StackGuard.
666 : void SetStackLimits();
667 :
668 : // The stack limit is thread-dependent. To be able to reproduce the same
669 : // snapshot blob, we need to reset it before serializing.
670 : void ClearStackLimits();
671 :
672 : void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
673 : void UnregisterStrongRoots(FullObjectSlot start);
674 :
675 : void SetBuiltinsConstantsTable(FixedArray cache);
676 :
677 : // A full copy of the interpreter entry trampoline, used as a template to
678 : // create copies of the builtin at runtime. The copies are used to create
679 : // better profiling information for ticks in bytecode execution. Note that
680 : // this is always a copy of the full builtin, i.e. not the off-heap
681 : // trampoline.
682 : // See also: FLAG_interpreted_frames_native_stack.
683 : void SetInterpreterEntryTrampolineForProfiling(Code code);
684 :
685 : // Add finalization_group into the dirty_js_finalization_groups list.
686 : void AddDirtyJSFinalizationGroup(
687 : JSFinalizationGroup finalization_group,
688 : std::function<void(HeapObject object, ObjectSlot slot, Object target)>
689 : gc_notify_updated_slot);
690 :
691 : void AddKeepDuringJobTarget(Handle<JSReceiver> target);
692 : void ClearKeepDuringJobSet();
693 :
694 : // ===========================================================================
695 : // Inline allocation. ========================================================
696 : // ===========================================================================
697 :
698 : // Indicates whether inline bump-pointer allocation has been disabled.
699 : bool inline_allocation_disabled() { return inline_allocation_disabled_; }
700 :
701 : // Switch whether inline bump-pointer allocation should be used.
702 : void EnableInlineAllocation();
703 : void DisableInlineAllocation();
704 :
705 : // ===========================================================================
706 : // Methods triggering GCs. ===================================================
707 : // ===========================================================================
708 :
709 : // Performs garbage collection operation.
710 : // Returns whether there is a chance that another major GC could
711 : // collect more garbage.
712 : V8_EXPORT_PRIVATE bool CollectGarbage(
713 : AllocationSpace space, GarbageCollectionReason gc_reason,
714 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
715 :
716 : // Performs a full garbage collection.
717 : V8_EXPORT_PRIVATE void CollectAllGarbage(
718 : int flags, GarbageCollectionReason gc_reason,
719 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
720 :
721 : // Last hope GC, should try to squeeze as much as possible.
722 : void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
723 :
724 : // Precise garbage collection that potentially finalizes already running
725 : // incremental marking before performing an atomic garbage collection.
726 : // Only use if absolutely necessary or in tests to avoid floating garbage!
727 : void PreciseCollectAllGarbage(
728 : int flags, GarbageCollectionReason gc_reason,
729 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
730 :
731 : // Reports and external memory pressure event, either performs a major GC or
732 : // completes incremental marking in order to free external resources.
733 : void ReportExternalMemoryPressure();
734 :
735 : typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
736 : GetExternallyAllocatedMemoryInBytesCallback;
737 :
738 : void SetGetExternallyAllocatedMemoryInBytesCallback(
739 : GetExternallyAllocatedMemoryInBytesCallback callback) {
740 61049 : external_memory_callback_ = callback;
741 : }
742 :
743 : // Invoked when GC was requested via the stack guard.
744 : void HandleGCRequest();
745 :
746 : // ===========================================================================
747 : // Builtins. =================================================================
748 : // ===========================================================================
749 :
750 : Code builtin(int index);
751 : Address builtin_address(int index);
752 : void set_builtin(int index, Code builtin);
753 :
754 : // ===========================================================================
755 : // Iterators. ================================================================
756 : // ===========================================================================
757 :
758 : // None of these methods iterate over the read-only roots. To do this use
759 : // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
760 : // garbage collection and is usually only performed as part of
761 : // (de)serialization or heap verification.
762 :
763 : // Iterates over the strong roots and the weak roots.
764 : void IterateRoots(RootVisitor* v, VisitMode mode);
765 : // Iterates over the strong roots.
766 : void IterateStrongRoots(RootVisitor* v, VisitMode mode);
767 : // Iterates over entries in the smi roots list. Only interesting to the
768 : // serializer/deserializer, since GC does not care about smis.
769 : void IterateSmiRoots(RootVisitor* v);
770 : // Iterates over weak string tables.
771 : void IterateWeakRoots(RootVisitor* v, VisitMode mode);
772 : // Iterates over weak global handles.
773 : void IterateWeakGlobalHandles(RootVisitor* v);
774 : // Iterates over builtins.
775 : void IterateBuiltins(RootVisitor* v);
776 :
777 : // ===========================================================================
778 : // Store buffer API. =========================================================
779 : // ===========================================================================
780 :
781 : // Used for query incremental marking status in generated code.
782 : Address* IsMarkingFlagAddress() {
783 : return reinterpret_cast<Address*>(&is_marking_flag_);
784 : }
785 :
786 159037 : void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
787 :
788 : Address* store_buffer_top_address();
789 : static intptr_t store_buffer_mask_constant();
790 : static Address store_buffer_overflow_function_address();
791 :
792 : void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
793 : void ClearRecordedSlotRange(Address start, Address end);
794 :
795 : #ifdef DEBUG
796 : void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
797 : #endif
798 :
799 : // ===========================================================================
800 : // Incremental marking API. ==================================================
801 : // ===========================================================================
802 :
803 : int GCFlagsForIncrementalMarking() {
804 1595173 : return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
805 1595172 : : kNoGCFlags;
806 : }
807 :
808 : // Start incremental marking and ensure that idle time handler can perform
809 : // incremental steps.
810 : void StartIdleIncrementalMarking(
811 : GarbageCollectionReason gc_reason,
812 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
813 :
814 : // Starts incremental marking assuming incremental marking is currently
815 : // stopped.
816 : void StartIncrementalMarking(
817 : int gc_flags, GarbageCollectionReason gc_reason,
818 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
819 :
820 : void StartIncrementalMarkingIfAllocationLimitIsReached(
821 : int gc_flags,
822 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
823 :
824 : void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
825 : // Synchronously finalizes incremental marking.
826 : void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
827 :
828 : void RegisterDeserializedObjectsForBlackAllocation(
829 : Reservation* reservations, const std::vector<HeapObject>& large_objects,
830 : const std::vector<Address>& maps);
831 :
832 239498 : IncrementalMarking* incremental_marking() {
833 239498 : return incremental_marking_.get();
834 : }
835 :
836 : // ===========================================================================
837 : // Concurrent marking API. ===================================================
838 : // ===========================================================================
839 :
840 : ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
841 :
842 : // The runtime uses this function to notify potentially unsafe object layout
843 : // changes that require special synchronization with the concurrent marker.
844 : // The old size is the size of the object before layout change.
845 : void NotifyObjectLayoutChange(HeapObject object, int old_size,
846 : const DisallowHeapAllocation&);
847 :
848 : #ifdef VERIFY_HEAP
849 : // This function checks that either
850 : // - the map transition is safe,
851 : // - or it was communicated to GC using NotifyObjectLayoutChange.
852 : void VerifyObjectLayoutChange(HeapObject object, Map new_map);
853 : #endif
854 :
855 : // ===========================================================================
856 : // Deoptimization support API. ===============================================
857 : // ===========================================================================
858 :
859 : // Setters for code offsets of well-known deoptimization targets.
860 : void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
861 : void SetConstructStubCreateDeoptPCOffset(int pc_offset);
862 : void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
863 : void SetInterpreterEntryReturnPCOffset(int pc_offset);
864 :
865 : // Invalidates references in the given {code} object that are referenced
866 : // transitively from the deoptimization data. Mutates write-protected code.
867 : void InvalidateCodeDeoptimizationData(Code code);
868 :
869 : void DeoptMarkedAllocationSites();
870 :
871 : bool DeoptMaybeTenuredAllocationSites();
872 :
873 : // ===========================================================================
874 : // Embedder heap tracer support. =============================================
875 : // ===========================================================================
876 :
877 221360 : LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
878 221360 : return local_embedder_heap_tracer_.get();
879 : }
880 :
881 : void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
882 : EmbedderHeapTracer* GetEmbedderHeapTracer() const;
883 :
884 : void RegisterExternallyReferencedObject(Address* location);
885 : void SetEmbedderStackStateForNextFinalizaton(
886 : EmbedderHeapTracer::EmbedderStackState stack_state);
887 :
888 : // ===========================================================================
889 : // External string table API. ================================================
890 : // ===========================================================================
891 :
892 : // Registers an external string.
893 : inline void RegisterExternalString(String string);
894 :
895 : // Called when a string's resource is changed. The size of the payload is sent
896 : // as argument of the method.
897 : void UpdateExternalString(String string, size_t old_payload,
898 : size_t new_payload);
899 :
900 : // Finalizes an external string by deleting the associated external
901 : // data and clearing the resource pointer.
902 : inline void FinalizeExternalString(String string);
903 :
904 : static String UpdateYoungReferenceInExternalStringTableEntry(
905 : Heap* heap, FullObjectSlot pointer);
906 :
907 : // ===========================================================================
908 : // Methods checking/returning the space of a given object/address. ===========
909 : // ===========================================================================
910 :
911 : // Returns whether the object resides in new space.
912 : static inline bool InYoungGeneration(Object object);
913 : static inline bool InYoungGeneration(MaybeObject object);
914 : static inline bool InYoungGeneration(HeapObject heap_object);
915 : static inline bool InFromPage(Object object);
916 : static inline bool InFromPage(MaybeObject object);
917 : static inline bool InFromPage(HeapObject heap_object);
918 : static inline bool InToPage(Object object);
919 : static inline bool InToPage(MaybeObject object);
920 : static inline bool InToPage(HeapObject heap_object);
921 :
922 : // Returns whether the object resides in old space.
923 : inline bool InOldSpace(Object object);
924 :
925 : // Returns whether the object resides in read-only space.
926 : inline bool InReadOnlySpace(Object object);
927 :
928 : // Checks whether an address/object in the heap (including auxiliary
929 : // area and unused area).
930 : bool Contains(HeapObject value);
931 :
932 : // Checks whether an address/object in a space.
933 : // Currently used by tests, serialization and heap verification only.
934 : bool InSpace(HeapObject value, AllocationSpace space);
935 :
936 : // Slow methods that can be used for verification as they can also be used
937 : // with off-heap Addresses.
938 : bool InSpaceSlow(Address addr, AllocationSpace space);
939 :
940 : static inline Heap* FromWritableHeapObject(const HeapObject obj);
941 :
942 : // ===========================================================================
943 : // Object statistics tracking. ===============================================
944 : // ===========================================================================
945 :
946 : // Returns the number of buckets used by object statistics tracking during a
947 : // major GC. Note that the following methods fail gracefully when the bounds
948 : // are exceeded though.
949 : size_t NumberOfTrackedHeapObjectTypes();
950 :
951 : // Returns object statistics about count and size at the last major GC.
952 : // Objects are being grouped into buckets that roughly resemble existing
953 : // instance types.
954 : size_t ObjectCountAtLastGC(size_t index);
955 : size_t ObjectSizeAtLastGC(size_t index);
956 :
957 : // Retrieves names of buckets used by object statistics tracking.
958 : bool GetObjectTypeName(size_t index, const char** object_type,
959 : const char** object_sub_type);
960 :
961 : // The total number of native contexts object on the heap.
962 : size_t NumberOfNativeContexts();
963 : // The total number of native contexts that were detached but were not
964 : // garbage collected yet.
965 : size_t NumberOfDetachedContexts();
966 :
967 : // ===========================================================================
968 : // Code statistics. ==========================================================
969 : // ===========================================================================
970 :
971 : // Collect code (Code and BytecodeArray objects) statistics.
972 : void CollectCodeStatistics();
973 :
974 : // ===========================================================================
975 : // GC statistics. ============================================================
976 : // ===========================================================================
977 :
978 : // Returns the maximum amount of memory reserved for the heap.
979 : size_t MaxReserved();
980 : size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
981 : size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
982 : size_t MaxOldGenerationSize() { return max_old_generation_size_; }
983 :
984 : V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
985 : uint64_t physical_memory);
986 :
987 : static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
988 : const uint64_t min_physical_memory = 512 * MB;
989 : const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
990 :
991 : uint64_t capped_physical_memory =
992 : Max(Min(physical_memory, max_physical_memory), min_physical_memory);
993 : // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
994 : size_t semi_space_size_in_kb =
995 29546 : static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
996 29546 : (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
997 : (max_physical_memory - min_physical_memory) +
998 : kMinSemiSpaceSizeInKB);
999 : return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
1000 : }
1001 :
1002 : // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1003 : // more spaces are needed until it reaches the limit.
1004 : size_t Capacity();
1005 :
1006 : // Returns the capacity of the old generation.
1007 : size_t OldGenerationCapacity();
1008 :
1009 : // Returns the amount of memory currently held alive by the unmapper.
1010 : size_t CommittedMemoryOfUnmapper();
1011 :
1012 : // Returns the amount of memory currently committed for the heap.
1013 : size_t CommittedMemory();
1014 :
1015 : // Returns the amount of memory currently committed for the old space.
1016 : size_t CommittedOldGenerationMemory();
1017 :
1018 : // Returns the amount of executable memory currently committed for the heap.
1019 : size_t CommittedMemoryExecutable();
1020 :
1021 : // Returns the amount of phyical memory currently committed for the heap.
1022 : size_t CommittedPhysicalMemory();
1023 :
1024 : // Returns the maximum amount of memory ever committed for the heap.
1025 : size_t MaximumCommittedMemory() { return maximum_committed_; }
1026 :
1027 : // Updates the maximum committed memory for the heap. Should be called
1028 : // whenever a space grows.
1029 : void UpdateMaximumCommitted();
1030 :
1031 : // Returns the available bytes in space w/o growing.
1032 : // Heap doesn't guarantee that it can allocate an object that requires
1033 : // all available bytes. Check MaxHeapObjectSize() instead.
1034 : size_t Available();
1035 :
1036 : // Returns of size of all objects residing in the heap.
1037 : size_t SizeOfObjects();
1038 :
1039 : void UpdateSurvivalStatistics(int start_new_space_size);
1040 :
1041 : inline void IncrementPromotedObjectsSize(size_t object_size) {
1042 115104 : promoted_objects_size_ += object_size;
1043 : }
1044 : inline size_t promoted_objects_size() { return promoted_objects_size_; }
1045 :
1046 : inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1047 115104 : semi_space_copied_object_size_ += object_size;
1048 : }
1049 : inline size_t semi_space_copied_object_size() {
1050 : return semi_space_copied_object_size_;
1051 : }
1052 :
1053 : inline size_t SurvivedYoungObjectSize() {
1054 144960 : return promoted_objects_size_ + semi_space_copied_object_size_;
1055 : }
1056 :
1057 2579305 : inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1058 :
1059 1557350 : inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1060 :
1061 309924 : inline void IncrementNodesPromoted() { nodes_promoted_++; }
1062 :
1063 : inline void IncrementYoungSurvivorsCounter(size_t survived) {
1064 100744 : survived_last_scavenge_ = survived;
1065 100744 : survived_since_last_expansion_ += survived;
1066 : }
1067 :
1068 476235 : inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() {
1069 952300 : return OldGenerationSizeOfObjects() + PromotedExternalMemorySize();
1070 : }
1071 :
1072 : inline void UpdateNewSpaceAllocationCounter();
1073 :
1074 : inline size_t NewSpaceAllocationCounter();
1075 :
1076 : // This should be used only for testing.
1077 : void set_new_space_allocation_counter(size_t new_value) {
1078 5 : new_space_allocation_counter_ = new_value;
1079 : }
1080 :
1081 : void UpdateOldGenerationAllocationCounter() {
1082 : old_generation_allocation_counter_at_last_gc_ =
1083 74510 : OldGenerationAllocationCounter();
1084 74510 : old_generation_size_at_last_gc_ = 0;
1085 : }
1086 :
1087 : size_t OldGenerationAllocationCounter() {
1088 274648 : return old_generation_allocation_counter_at_last_gc_ +
1089 274653 : PromotedSinceLastGC();
1090 : }
1091 :
1092 : // This should be used only for testing.
1093 : void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1094 5 : old_generation_allocation_counter_at_last_gc_ = new_value;
1095 : }
1096 :
1097 : size_t PromotedSinceLastGC() {
1098 274653 : size_t old_generation_size = OldGenerationSizeOfObjects();
1099 : DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
1100 274653 : return old_generation_size - old_generation_size_at_last_gc_;
1101 : }
1102 :
1103 : // This is called by the sweeper when it discovers more free space
1104 : // than expected at the end of the preceding GC.
1105 : void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
1106 11845 : if (old_generation_size_at_last_gc_ != 0) {
1107 : // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|.
1108 : // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC
1109 : // continues to increase monotonically, rather than decreasing here.
1110 : DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
1111 6855 : old_generation_size_at_last_gc_ -= decreased_bytes;
1112 : }
1113 : }
1114 :
1115 67204111 : int gc_count() const { return gc_count_; }
1116 :
1117 500208 : bool is_current_gc_forced() const { return is_current_gc_forced_; }
1118 :
1119 : // Returns the size of objects residing in non-new spaces.
1120 : // Excludes external memory held by those objects.
1121 : size_t OldGenerationSizeOfObjects();
1122 :
1123 : // ===========================================================================
1124 : // Prologue/epilogue callback methods.========================================
1125 : // ===========================================================================
1126 :
1127 : void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1128 : GCType gc_type_filter, void* data);
1129 : void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1130 : void* data);
1131 :
1132 : void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1133 : GCType gc_type_filter, void* data);
1134 : void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1135 : void* data);
1136 :
1137 : void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1138 : void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1139 :
1140 : // ===========================================================================
1141 : // Allocation methods. =======================================================
1142 : // ===========================================================================
1143 :
1144 : // Creates a filler object and returns a heap object immediately after it.
1145 : V8_WARN_UNUSED_RESULT HeapObject PrecedeWithFiller(HeapObject object,
1146 : int filler_size);
1147 :
1148 : // Creates a filler object if needed for alignment and returns a heap object
1149 : // immediately after it. If any space is left after the returned object,
1150 : // another filler object is created so the over allocated memory is iterable.
1151 : V8_WARN_UNUSED_RESULT HeapObject
1152 : AlignWithFiller(HeapObject object, int object_size, int allocation_size,
1153 : AllocationAlignment alignment);
1154 :
1155 : // ===========================================================================
1156 : // ArrayBuffer tracking. =====================================================
1157 : // ===========================================================================
1158 :
1159 : // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1160 : // in the registration/unregistration APIs. Consider dropping the "New" from
1161 : // "RegisterNewArrayBuffer" because one can re-register a previously
1162 : // unregistered buffer, too, and the name is confusing.
1163 : void RegisterNewArrayBuffer(JSArrayBuffer buffer);
1164 : void UnregisterArrayBuffer(JSArrayBuffer buffer);
1165 :
1166 : // ===========================================================================
1167 : // Allocation site tracking. =================================================
1168 : // ===========================================================================
1169 :
1170 : // Updates the AllocationSite of a given {object}. The entry (including the
1171 : // count) is cached on the local pretenuring feedback.
1172 : inline void UpdateAllocationSite(
1173 : Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
1174 :
1175 : // Merges local pretenuring feedback into the global one. Note that this
1176 : // method needs to be called after evacuation, as allocation sites may be
1177 : // evacuated and this method resolves forward pointers accordingly.
1178 : void MergeAllocationSitePretenuringFeedback(
1179 : const PretenuringFeedbackMap& local_pretenuring_feedback);
1180 :
1181 : // ===========================================================================
1182 : // Allocation tracking. ======================================================
1183 : // ===========================================================================
1184 :
1185 : // Adds {new_space_observer} to new space and {observer} to any other space.
1186 : void AddAllocationObserversToAllSpaces(
1187 : AllocationObserver* observer, AllocationObserver* new_space_observer);
1188 :
1189 : // Removes {new_space_observer} from new space and {observer} from any other
1190 : // space.
1191 : void RemoveAllocationObserversFromAllSpaces(
1192 : AllocationObserver* observer, AllocationObserver* new_space_observer);
1193 :
1194 : bool allocation_step_in_progress() { return allocation_step_in_progress_; }
1195 : void set_allocation_step_in_progress(bool val) {
1196 44286996 : allocation_step_in_progress_ = val;
1197 : }
1198 :
1199 : // ===========================================================================
1200 : // Heap object allocation tracking. ==========================================
1201 : // ===========================================================================
1202 :
1203 : void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1204 : void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1205 : bool has_heap_object_allocation_tracker() const {
1206 : return !allocation_trackers_.empty();
1207 : }
1208 :
1209 : // ===========================================================================
1210 : // Retaining path tracking. ==================================================
1211 : // ===========================================================================
1212 :
1213 : // Adds the given object to the weak table of retaining path targets.
1214 : // On each GC if the marker discovers the object, it will print the retaining
1215 : // path. This requires --track-retaining-path flag.
1216 : void AddRetainingPathTarget(Handle<HeapObject> object,
1217 : RetainingPathOption option);
1218 :
1219 : // ===========================================================================
1220 : // Stack frame support. ======================================================
1221 : // ===========================================================================
1222 :
1223 : // Returns the Code object for a given interior pointer.
1224 : Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
1225 :
1226 : // Returns true if {addr} is contained within {code} and false otherwise.
1227 : // Mostly useful for debugging.
1228 : bool GcSafeCodeContains(Code code, Address addr);
1229 :
1230 : // =============================================================================
1231 : #ifdef VERIFY_HEAP
1232 : // Verify the heap is in its normal state before or after a GC.
1233 : void Verify();
1234 : void VerifyRememberedSetFor(HeapObject object);
1235 : #endif
1236 :
1237 : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1238 : void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1239 : #endif
1240 :
1241 : #ifdef DEBUG
1242 : void VerifyCountersAfterSweeping();
1243 : void VerifyCountersBeforeConcurrentSweeping();
1244 :
1245 : void Print();
1246 : void PrintHandles();
1247 :
1248 : // Report code statistics.
1249 : void ReportCodeStatistics(const char* title);
1250 : #endif
1251 : void* GetRandomMmapAddr() {
1252 740113 : void* result = v8::internal::GetRandomMmapAddr();
1253 : #if V8_TARGET_ARCH_X64
1254 : #if V8_OS_MACOSX
1255 : // The Darwin kernel [as of macOS 10.12.5] does not clean up page
1256 : // directory entries [PDE] created from mmap or mach_vm_allocate, even
1257 : // after the region is destroyed. Using a virtual address space that is
1258 : // too large causes a leak of about 1 wired [can never be paged out] page
1259 : // per call to mmap(). The page is only reclaimed when the process is
1260 : // killed. Confine the hint to a 32-bit section of the virtual address
1261 : // space. See crbug.com/700928.
1262 : uintptr_t offset =
1263 : reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
1264 : kMmapRegionMask;
1265 : result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1266 : #endif // V8_OS_MACOSX
1267 : #endif // V8_TARGET_ARCH_X64
1268 : return result;
1269 : }
1270 :
1271 : static const char* GarbageCollectionReasonToString(
1272 : GarbageCollectionReason gc_reason);
1273 :
1274 : // Calculates the nof entries for the full sized number to string cache.
1275 : inline int MaxNumberToStringCacheSize() const;
1276 :
1277 : private:
1278 : class SkipStoreBufferScope;
1279 :
1280 : typedef String (*ExternalStringTableUpdaterCallback)(Heap* heap,
1281 : FullObjectSlot pointer);
1282 :
1283 : // External strings table is a place where all external strings are
1284 : // registered. We need to keep track of such strings to properly
1285 : // finalize them.
1286 122068 : class ExternalStringTable {
1287 : public:
1288 61049 : explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1289 :
1290 : // Registers an external string.
1291 : inline void AddString(String string);
1292 : bool Contains(String string);
1293 :
1294 : void IterateAll(RootVisitor* v);
1295 : void IterateYoung(RootVisitor* v);
1296 : void PromoteYoung();
1297 :
1298 : // Restores internal invariant and gets rid of collected strings. Must be
1299 : // called after each Iterate*() that modified the strings.
1300 : void CleanUpAll();
1301 : void CleanUpYoung();
1302 :
1303 : // Finalize all registered external strings and clear tables.
1304 : void TearDown();
1305 :
1306 : void UpdateYoungReferences(
1307 : Heap::ExternalStringTableUpdaterCallback updater_func);
1308 : void UpdateReferences(
1309 : Heap::ExternalStringTableUpdaterCallback updater_func);
1310 :
1311 : private:
1312 : void Verify();
1313 : void VerifyYoung();
1314 :
1315 : Heap* const heap_;
1316 :
1317 : // To speed up scavenge collections young string are kept separate from old
1318 : // strings.
1319 : std::vector<Object> young_strings_;
1320 : std::vector<Object> old_strings_;
1321 :
1322 : DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1323 : };
1324 :
1325 : struct StrongRootsList;
1326 :
1327 : struct StringTypeTable {
1328 : InstanceType type;
1329 : int size;
1330 : RootIndex index;
1331 : };
1332 :
1333 : struct ConstantStringTable {
1334 : const char* contents;
1335 : RootIndex index;
1336 : };
1337 :
1338 : struct StructTable {
1339 : InstanceType type;
1340 : int size;
1341 : RootIndex index;
1342 : };
1343 :
1344 8230 : struct GCCallbackTuple {
1345 : GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
1346 : void* data)
1347 69283 : : callback(callback), gc_type(gc_type), data(data) {}
1348 :
1349 : bool operator==(const GCCallbackTuple& other) const;
1350 : GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
1351 :
1352 : v8::Isolate::GCCallbackWithData callback;
1353 : GCType gc_type;
1354 : void* data;
1355 : };
1356 :
1357 : static const int kInitialStringTableSize = StringTable::kMinCapacity;
1358 : static const int kInitialEvalCacheSize = 64;
1359 : static const int kInitialNumberStringCacheSize = 256;
1360 :
1361 : static const int kRememberedUnmappedPages = 128;
1362 :
1363 : static const StringTypeTable string_type_table[];
1364 : static const ConstantStringTable constant_string_table[];
1365 : static const StructTable struct_table[];
1366 :
1367 : static const int kYoungSurvivalRateHighThreshold = 90;
1368 : static const int kYoungSurvivalRateAllowedDeviation = 15;
1369 : static const int kOldSurvivalRateLowThreshold = 10;
1370 :
1371 : static const int kMaxMarkCompactsInIdleRound = 7;
1372 : static const int kIdleScavengeThreshold = 5;
1373 :
1374 : static const int kInitialFeedbackCapacity = 256;
1375 :
1376 : Heap();
1377 : ~Heap();
1378 :
1379 : // Selects the proper allocation space based on the pretenuring decision.
1380 277335329 : static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1381 277335329 : switch (pretenure) {
1382 : case TENURED_READ_ONLY:
1383 : return RO_SPACE;
1384 : case TENURED:
1385 119276507 : return OLD_SPACE;
1386 : case NOT_TENURED:
1387 157858692 : return NEW_SPACE;
1388 : default:
1389 0 : UNREACHABLE();
1390 : }
1391 : }
1392 :
1393 0 : static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
1394 0 : return 0;
1395 : }
1396 :
1397 : #define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
1398 : ROOT_LIST(ROOT_ACCESSOR)
1399 : #undef ROOT_ACCESSOR
1400 :
1401 : StoreBuffer* store_buffer() { return store_buffer_.get(); }
1402 :
1403 : void set_current_gc_flags(int flags) {
1404 119096 : current_gc_flags_ = flags;
1405 : }
1406 :
1407 : inline bool ShouldReduceMemory() const {
1408 1072492 : return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1409 : }
1410 :
1411 : int NumberOfScavengeTasks();
1412 :
1413 : // Checks whether a global GC is necessary
1414 : GarbageCollector SelectGarbageCollector(AllocationSpace space,
1415 : const char** reason);
1416 :
1417 : // Make sure there is a filler value behind the top of the new space
1418 : // so that the GC does not confuse some unintialized/stale memory
1419 : // with the allocation memento of the object at the top
1420 : void EnsureFillerObjectAtTop();
1421 :
1422 : // Ensure that we have swept all spaces in such a way that we can iterate
1423 : // over all objects. May cause a GC.
1424 : void MakeHeapIterable();
1425 :
1426 : // Performs garbage collection
1427 : // Returns whether there is a chance another major GC could
1428 : // collect more garbage.
1429 : bool PerformGarbageCollection(
1430 : GarbageCollector collector,
1431 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1432 :
1433 : inline void UpdateOldSpaceLimits();
1434 :
1435 : bool CreateInitialMaps();
1436 : void CreateInternalAccessorInfoObjects();
1437 : void CreateInitialObjects();
1438 :
1439 : // Commits from space if it is uncommitted.
1440 : void EnsureFromSpaceIsCommitted();
1441 :
1442 : // Uncommit unused semi space.
1443 : bool UncommitFromSpace();
1444 :
1445 : // Fill in bogus values in from space
1446 : void ZapFromSpace();
1447 :
1448 : // Zaps the memory of a code object.
1449 : void ZapCodeObject(Address start_address, int size_in_bytes);
1450 :
1451 : // Deopts all code that contains allocation instruction which are tenured or
1452 : // not tenured. Moreover it clears the pretenuring allocation site statistics.
1453 : void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1454 :
1455 : // Evaluates local pretenuring for the old space and calls
1456 : // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1457 : // the old space.
1458 : void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1459 :
1460 : // Record statistics after garbage collection.
1461 : void ReportStatisticsAfterGC();
1462 :
1463 : // Flush the number to string cache.
1464 : void FlushNumberStringCache();
1465 :
1466 : void ConfigureInitialOldGenerationSize();
1467 :
1468 : bool HasLowYoungGenerationAllocationRate();
1469 : bool HasLowOldGenerationAllocationRate();
1470 : double YoungGenerationMutatorUtilization();
1471 : double OldGenerationMutatorUtilization();
1472 :
1473 : void ReduceNewSpaceSize();
1474 :
1475 : GCIdleTimeHeapState ComputeHeapState();
1476 :
1477 : bool PerformIdleTimeAction(GCIdleTimeAction action,
1478 : GCIdleTimeHeapState heap_state,
1479 : double deadline_in_ms);
1480 :
1481 : void IdleNotificationEpilogue(GCIdleTimeAction action,
1482 : GCIdleTimeHeapState heap_state, double start_ms,
1483 : double deadline_in_ms);
1484 :
1485 : int NextAllocationTimeout(int current_timeout = 0);
1486 : inline void UpdateAllocationsHash(HeapObject object);
1487 : inline void UpdateAllocationsHash(uint32_t value);
1488 : void PrintAllocationsHash();
1489 :
1490 : void PrintMaxMarkingLimitReached();
1491 : void PrintMaxNewSpaceSizeReached();
1492 :
1493 : int NextStressMarkingLimit();
1494 :
1495 : void AddToRingBuffer(const char* string);
1496 : void GetFromRingBuffer(char* buffer);
1497 :
1498 : void CompactRetainedMaps(WeakArrayList retained_maps);
1499 :
1500 : void CollectGarbageOnMemoryPressure();
1501 :
1502 : void EagerlyFreeExternalMemory();
1503 :
1504 : bool InvokeNearHeapLimitCallback();
1505 :
1506 : void ComputeFastPromotionMode();
1507 :
1508 : // Attempt to over-approximate the weak closure by marking object groups and
1509 : // implicit references from global handles, but don't atomically complete
1510 : // marking. If we continue to mark incrementally, we might have marked
1511 : // objects that die later.
1512 : void FinalizeIncrementalMarkingIncrementally(
1513 : GarbageCollectionReason gc_reason);
1514 :
1515 : // Returns the timer used for a given GC type.
1516 : // - GCScavenger: young generation GC
1517 : // - GCCompactor: full GC
1518 : // - GCFinalzeMC: finalization of incremental full GC
1519 : // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1520 : // memory reduction
1521 : TimedHistogram* GCTypeTimer(GarbageCollector collector);
1522 : TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
1523 :
1524 : // ===========================================================================
1525 : // Pretenuring. ==============================================================
1526 : // ===========================================================================
1527 :
1528 : // Pretenuring decisions are made based on feedback collected during new space
1529 : // evacuation. Note that between feedback collection and calling this method
1530 : // object in old space must not move.
1531 : void ProcessPretenuringFeedback();
1532 :
1533 : // Removes an entry from the global pretenuring storage.
1534 : void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
1535 :
1536 : // ===========================================================================
1537 : // Actual GC. ================================================================
1538 : // ===========================================================================
1539 :
1540 : // Code that should be run before and after each GC. Includes some
1541 : // reporting/verification activities when compiled with DEBUG set.
1542 : void GarbageCollectionPrologue();
1543 : void GarbageCollectionEpilogue();
1544 :
1545 : // Performs a major collection in the whole heap.
1546 : void MarkCompact();
1547 : // Performs a minor collection of just the young generation.
1548 : void MinorMarkCompact();
1549 :
1550 : // Code to be run before and after mark-compact.
1551 : void MarkCompactPrologue();
1552 : void MarkCompactEpilogue();
1553 :
1554 : // Performs a minor collection in new generation.
1555 : void Scavenge();
1556 : void EvacuateYoungGeneration();
1557 :
1558 : void UpdateYoungReferencesInExternalStringTable(
1559 : ExternalStringTableUpdaterCallback updater_func);
1560 :
1561 : void UpdateReferencesInExternalStringTable(
1562 : ExternalStringTableUpdaterCallback updater_func);
1563 :
1564 : void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1565 : void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1566 : void ProcessNativeContexts(WeakObjectRetainer* retainer);
1567 : void ProcessAllocationSites(WeakObjectRetainer* retainer);
1568 : void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1569 :
1570 : // ===========================================================================
1571 : // GC statistics. ============================================================
1572 : // ===========================================================================
1573 :
1574 237464 : inline size_t OldGenerationSpaceAvailable() {
1575 474928 : if (old_generation_allocation_limit_ <=
1576 237464 : OldGenerationObjectsAndPromotedExternalMemorySize())
1577 : return 0;
1578 235183 : return old_generation_allocation_limit_ -
1579 : static_cast<size_t>(
1580 235183 : OldGenerationObjectsAndPromotedExternalMemorySize());
1581 : }
1582 :
1583 : // We allow incremental marking to overshoot the allocation limit for
1584 : // performace reasons. If the overshoot is too large then we are more
1585 : // eager to finalize incremental marking.
1586 1927 : inline bool AllocationLimitOvershotByLargeMargin() {
1587 : // This guards against too eager finalization in small heaps.
1588 : // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
1589 : size_t kMarginForSmallHeaps = 32u * MB;
1590 3854 : if (old_generation_allocation_limit_ >=
1591 1927 : OldGenerationObjectsAndPromotedExternalMemorySize())
1592 : return false;
1593 1661 : uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
1594 1661 : old_generation_allocation_limit_;
1595 : // Overshoot margin is 50% of allocation limit or half-way to the max heap
1596 : // with special handling of small heaps.
1597 : uint64_t margin =
1598 : Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1599 1661 : (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
1600 1661 : return overshoot >= margin;
1601 : }
1602 :
1603 : void UpdateTotalGCTime(double duration);
1604 :
1605 98000 : bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1606 :
1607 : bool IsIneffectiveMarkCompact(size_t old_generation_size,
1608 : double mutator_utilization);
1609 : void CheckIneffectiveMarkCompact(size_t old_generation_size,
1610 : double mutator_utilization);
1611 :
1612 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1613 : size_t amount);
1614 :
1615 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1616 : size_t amount);
1617 :
1618 : // ===========================================================================
1619 : // Growing strategy. =========================================================
1620 : // ===========================================================================
1621 :
1622 : HeapController* heap_controller() { return heap_controller_.get(); }
1623 : MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
1624 :
1625 : // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
1626 : // This constant limits the effect of load RAIL mode on GC.
1627 : // The value is arbitrary and chosen as the largest load time observed in
1628 : // v8 browsing benchmarks.
1629 : static const int kMaxLoadTimeMs = 7000;
1630 :
1631 : bool ShouldOptimizeForLoadTime();
1632 :
1633 : size_t old_generation_allocation_limit() const {
1634 : return old_generation_allocation_limit_;
1635 : }
1636 :
1637 : bool always_allocate() { return always_allocate_scope_count_ != 0; }
1638 :
1639 : bool CanExpandOldGeneration(size_t size);
1640 :
1641 : bool ShouldExpandOldGenerationOnSlowAllocation();
1642 :
1643 : enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
1644 :
1645 : HeapGrowingMode CurrentHeapGrowingMode();
1646 :
1647 : enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
1648 : IncrementalMarkingLimit IncrementalMarkingLimitReached();
1649 :
1650 : // ===========================================================================
1651 : // Idle notification. ========================================================
1652 : // ===========================================================================
1653 :
1654 : bool RecentIdleNotificationHappened();
1655 : void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1656 :
1657 : // ===========================================================================
1658 : // HeapIterator helpers. =====================================================
1659 : // ===========================================================================
1660 :
1661 7585 : void heap_iterator_start() { heap_iterator_depth_++; }
1662 :
1663 7585 : void heap_iterator_end() { heap_iterator_depth_--; }
1664 :
1665 : bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
1666 :
1667 : // ===========================================================================
1668 : // Allocation methods. =======================================================
1669 : // ===========================================================================
1670 :
1671 : // Allocates a JS Map in the heap.
1672 : V8_WARN_UNUSED_RESULT AllocationResult
1673 : AllocateMap(InstanceType instance_type, int instance_size,
1674 : ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
1675 : int inobject_properties = 0);
1676 :
1677 : // Allocate an uninitialized object. The memory is non-executable if the
1678 : // hardware and OS allow. This is the single choke-point for allocations
1679 : // performed by the runtime and should not be bypassed (to extend this to
1680 : // inlined allocations, use the Heap::DisableInlineAllocation() support).
1681 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
1682 : int size_in_bytes, AllocationSpace space,
1683 : AllocationAlignment aligment = kWordAligned);
1684 :
1685 : // This method will try to perform an allocation of a given size in a given
1686 : // space. If the allocation fails, a regular full garbage collection is
1687 : // triggered and the allocation is retried. This is performed multiple times.
1688 : // If after that retry procedure the allocation still fails nullptr is
1689 : // returned.
1690 : HeapObject AllocateRawWithLightRetry(
1691 : int size, AllocationSpace space,
1692 : AllocationAlignment alignment = kWordAligned);
1693 :
1694 : // This method will try to perform an allocation of a given size in a given
1695 : // space. If the allocation fails, a regular full garbage collection is
1696 : // triggered and the allocation is retried. This is performed multiple times.
1697 : // If after that retry procedure the allocation still fails a "hammer"
1698 : // garbage collection is triggered which tries to significantly reduce memory.
1699 : // If the allocation still fails after that a fatal error is thrown.
1700 : HeapObject AllocateRawWithRetryOrFail(
1701 : int size, AllocationSpace space,
1702 : AllocationAlignment alignment = kWordAligned);
1703 : HeapObject AllocateRawCodeInLargeObjectSpace(int size);
1704 :
1705 : // Allocates a heap object based on the map.
1706 : V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
1707 : AllocationSpace space);
1708 :
1709 : // Takes a code object and checks if it is on memory which is not subject to
1710 : // compaction. This method will return a new code object on an immovable
1711 : // memory location if the original code object was movable.
1712 : HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
1713 :
1714 : // Allocates a partial map for bootstrapping.
1715 : V8_WARN_UNUSED_RESULT AllocationResult
1716 : AllocatePartialMap(InstanceType instance_type, int instance_size);
1717 :
1718 : void FinalizePartialMap(Map map);
1719 :
1720 : // Allocate empty fixed typed array of given type.
1721 : V8_WARN_UNUSED_RESULT AllocationResult
1722 : AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1723 :
1724 75 : void set_force_oom(bool value) { force_oom_ = value; }
1725 :
1726 : // ===========================================================================
1727 : // Retaining path tracing ====================================================
1728 : // ===========================================================================
1729 :
1730 : void AddRetainer(HeapObject retainer, HeapObject object);
1731 : void AddEphemeronRetainer(HeapObject retainer, HeapObject object);
1732 : void AddRetainingRoot(Root root, HeapObject object);
1733 : // Returns true if the given object is a target of retaining path tracking.
1734 : // Stores the option corresponding to the object in the provided *option.
1735 : bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
1736 : void PrintRetainingPath(HeapObject object, RetainingPathOption option);
1737 :
1738 : #ifdef DEBUG
1739 : void IncrementObjectCounters();
1740 : #endif // DEBUG
1741 :
1742 : // The amount of memory that has been freed concurrently.
1743 : std::atomic<intptr_t> external_memory_concurrently_freed_{0};
1744 :
1745 : // This can be calculated directly from a pointer to the heap; however, it is
1746 : // more expedient to get at the isolate directly from within Heap methods.
1747 : Isolate* isolate_ = nullptr;
1748 :
1749 : size_t code_range_size_ = 0;
1750 : size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
1751 : size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
1752 : size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
1753 : size_t initial_max_old_generation_size_;
1754 : size_t initial_max_old_generation_size_threshold_;
1755 : size_t initial_old_generation_size_;
1756 : bool old_generation_size_configured_ = false;
1757 : size_t maximum_committed_ = 0;
1758 : size_t old_generation_capacity_after_bootstrap_ = 0;
1759 :
1760 : // Backing store bytes (array buffers and external strings).
1761 : std::atomic<size_t> backing_store_bytes_{0};
1762 :
1763 : // For keeping track of how much data has survived
1764 : // scavenge since last new space expansion.
1765 : size_t survived_since_last_expansion_ = 0;
1766 :
1767 : // ... and since the last scavenge.
1768 : size_t survived_last_scavenge_ = 0;
1769 :
1770 : // This is not the depth of nested AlwaysAllocateScope's but rather a single
1771 : // count, as scopes can be acquired from multiple tasks (read: threads).
1772 : std::atomic<size_t> always_allocate_scope_count_{0};
1773 :
1774 : // Stores the memory pressure level that set by MemoryPressureNotification
1775 : // and reset by a mark-compact garbage collection.
1776 : std::atomic<MemoryPressureLevel> memory_pressure_level_;
1777 :
1778 : std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
1779 : near_heap_limit_callbacks_;
1780 :
1781 : // For keeping track of context disposals.
1782 : int contexts_disposed_ = 0;
1783 :
1784 : // The length of the retained_maps array at the time of context disposal.
1785 : // This separates maps in the retained_maps array that were created before
1786 : // and after context disposal.
1787 : int number_of_disposed_maps_ = 0;
1788 :
1789 : NewSpace* new_space_ = nullptr;
1790 : OldSpace* old_space_ = nullptr;
1791 : CodeSpace* code_space_ = nullptr;
1792 : MapSpace* map_space_ = nullptr;
1793 : LargeObjectSpace* lo_space_ = nullptr;
1794 : CodeLargeObjectSpace* code_lo_space_ = nullptr;
1795 : NewLargeObjectSpace* new_lo_space_ = nullptr;
1796 : ReadOnlySpace* read_only_space_ = nullptr;
1797 : // Map from the space id to the space.
1798 : Space* space_[LAST_SPACE + 1];
1799 :
1800 : // Determines whether code space is write-protected. This is essentially a
1801 : // race-free copy of the {FLAG_write_protect_code_memory} flag.
1802 : bool write_protect_code_memory_ = false;
1803 :
1804 : // Holds the number of open CodeSpaceMemoryModificationScopes.
1805 : uintptr_t code_space_memory_modification_scope_depth_ = 0;
1806 :
1807 : HeapState gc_state_ = NOT_IN_GC;
1808 :
1809 : int gc_post_processing_depth_ = 0;
1810 :
1811 : // Returns the amount of external memory registered since last global gc.
1812 : uint64_t PromotedExternalMemorySize();
1813 :
1814 : // How many "runtime allocations" happened.
1815 : uint32_t allocations_count_ = 0;
1816 :
1817 : // Running hash over allocations performed.
1818 : uint32_t raw_allocations_hash_ = 0;
1819 :
1820 : // Starts marking when stress_marking_percentage_% of the marking start limit
1821 : // is reached.
1822 : int stress_marking_percentage_ = 0;
1823 :
1824 : // Observer that causes more frequent checks for reached incremental marking
1825 : // limit.
1826 : AllocationObserver* stress_marking_observer_ = nullptr;
1827 :
1828 : // Observer that can cause early scavenge start.
1829 : StressScavengeObserver* stress_scavenge_observer_ = nullptr;
1830 :
1831 : bool allocation_step_in_progress_ = false;
1832 :
1833 : // The maximum percent of the marking limit reached wihout causing marking.
1834 : // This is tracked when specyfing --fuzzer-gc-analysis.
1835 : double max_marking_limit_reached_ = 0.0;
1836 :
1837 : // How many mark-sweep collections happened.
1838 : unsigned int ms_count_ = 0;
1839 :
1840 : // How many gc happened.
1841 : unsigned int gc_count_ = 0;
1842 :
1843 : // The number of Mark-Compact garbage collections that are considered as
1844 : // ineffective. See IsIneffectiveMarkCompact() predicate.
1845 : int consecutive_ineffective_mark_compacts_ = 0;
1846 :
1847 : static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
1848 : uintptr_t mmap_region_base_ = 0;
1849 :
1850 : // For post mortem debugging.
1851 : int remembered_unmapped_pages_index_ = 0;
1852 : Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1853 :
1854 : // Limit that triggers a global GC on the next (normally caused) GC. This
1855 : // is checked when we have already decided to do a GC to help determine
1856 : // which collector to invoke, before expanding a paged space in the old
1857 : // generation and on every allocation in large object space.
1858 : size_t old_generation_allocation_limit_;
1859 :
1860 : // Indicates that inline bump-pointer allocation has been globally disabled
1861 : // for all spaces. This is used to disable allocations in generated code.
1862 : bool inline_allocation_disabled_ = false;
1863 :
1864 : // Weak list heads, threaded through the objects.
1865 : // List heads are initialized lazily and contain the undefined_value at start.
1866 : Object native_contexts_list_;
1867 : Object allocation_sites_list_;
1868 :
1869 : std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
1870 : std::vector<GCCallbackTuple> gc_prologue_callbacks_;
1871 :
1872 : GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
1873 :
1874 : int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
1875 :
1876 : size_t promoted_objects_size_ = 0;
1877 : double promotion_ratio_ = 0.0;
1878 : double promotion_rate_ = 0.0;
1879 : size_t semi_space_copied_object_size_ = 0;
1880 : size_t previous_semi_space_copied_object_size_ = 0;
1881 : double semi_space_copied_rate_ = 0.0;
1882 : int nodes_died_in_new_space_ = 0;
1883 : int nodes_copied_in_new_space_ = 0;
1884 : int nodes_promoted_ = 0;
1885 :
1886 : // This is the pretenuring trigger for allocation sites that are in maybe
1887 : // tenure state. When we switched to the maximum new space size we deoptimize
1888 : // the code that belongs to the allocation site and derive the lifetime
1889 : // of the allocation site.
1890 : unsigned int maximum_size_scavenges_ = 0;
1891 :
1892 : // Total time spent in GC.
1893 : double total_gc_time_ms_;
1894 :
1895 : // Last time an idle notification happened.
1896 : double last_idle_notification_time_ = 0.0;
1897 :
1898 : // Last time a garbage collection happened.
1899 : double last_gc_time_ = 0.0;
1900 :
1901 : std::unique_ptr<GCTracer> tracer_;
1902 : std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
1903 : MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
1904 : std::unique_ptr<ScavengerCollector> scavenger_collector_;
1905 : std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
1906 : std::unique_ptr<MemoryAllocator> memory_allocator_;
1907 : std::unique_ptr<StoreBuffer> store_buffer_;
1908 : std::unique_ptr<HeapController> heap_controller_;
1909 : std::unique_ptr<IncrementalMarking> incremental_marking_;
1910 : std::unique_ptr<ConcurrentMarking> concurrent_marking_;
1911 : std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
1912 : std::unique_ptr<MemoryReducer> memory_reducer_;
1913 : std::unique_ptr<ObjectStats> live_object_stats_;
1914 : std::unique_ptr<ObjectStats> dead_object_stats_;
1915 : std::unique_ptr<ScavengeJob> scavenge_job_;
1916 : std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
1917 : std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
1918 : StrongRootsList* strong_roots_list_ = nullptr;
1919 :
1920 : // This counter is increased before each GC and never reset.
1921 : // To account for the bytes allocated since the last GC, use the
1922 : // NewSpaceAllocationCounter() function.
1923 : size_t new_space_allocation_counter_ = 0;
1924 :
1925 : // This counter is increased before each GC and never reset. To
1926 : // account for the bytes allocated since the last GC, use the
1927 : // OldGenerationAllocationCounter() function.
1928 : size_t old_generation_allocation_counter_at_last_gc_ = 0;
1929 :
1930 : // The size of objects in old generation after the last MarkCompact GC.
1931 : size_t old_generation_size_at_last_gc_ = 0;
1932 :
1933 : // The feedback storage is used to store allocation sites (keys) and how often
1934 : // they have been visited (values) by finding a memento behind an object. The
1935 : // storage is only alive temporary during a GC. The invariant is that all
1936 : // pointers in this map are already fixed, i.e., they do not point to
1937 : // forwarding pointers.
1938 : PretenuringFeedbackMap global_pretenuring_feedback_;
1939 :
1940 : char trace_ring_buffer_[kTraceRingBufferSize];
1941 :
1942 : // Used as boolean.
1943 : uint8_t is_marking_flag_ = 0;
1944 :
1945 : // If it's not full then the data is from 0 to ring_buffer_end_. If it's
1946 : // full then the data is from ring_buffer_end_ to the end of the buffer and
1947 : // from 0 to ring_buffer_end_.
1948 : bool ring_buffer_full_ = false;
1949 : size_t ring_buffer_end_ = 0;
1950 :
1951 : // Flag is set when the heap has been configured. The heap can be repeatedly
1952 : // configured through the API until it is set up.
1953 : bool configured_ = false;
1954 :
1955 : // Currently set GC flags that are respected by all GC components.
1956 : int current_gc_flags_ = Heap::kNoGCFlags;
1957 :
1958 : // Currently set GC callback flags that are used to pass information between
1959 : // the embedder and V8's GC.
1960 : GCCallbackFlags current_gc_callback_flags_;
1961 :
1962 : bool is_current_gc_forced_;
1963 :
1964 : ExternalStringTable external_string_table_;
1965 :
1966 : base::Mutex relocation_mutex_;
1967 :
1968 : int gc_callbacks_depth_ = 0;
1969 :
1970 : bool deserialization_complete_ = false;
1971 :
1972 : // The depth of HeapIterator nestings.
1973 : int heap_iterator_depth_ = 0;
1974 :
1975 : bool fast_promotion_mode_ = false;
1976 :
1977 : // Used for testing purposes.
1978 : bool force_oom_ = false;
1979 : bool delay_sweeper_tasks_for_testing_ = false;
1980 :
1981 : HeapObject pending_layout_change_object_;
1982 :
1983 : base::Mutex unprotected_memory_chunks_mutex_;
1984 : std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
1985 : bool unprotected_memory_chunks_registry_enabled_ = false;
1986 :
1987 : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1988 : // If the --gc-interval flag is set to a positive value, this
1989 : // variable holds the value indicating the number of allocations
1990 : // remain until the next failure and garbage collection.
1991 : int allocation_timeout_ = 0;
1992 : #endif // V8_ENABLE_ALLOCATION_TIMEOUT
1993 :
1994 : std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
1995 : std::map<HeapObject, Root, Object::Comparer> retaining_root_;
1996 : // If an object is retained by an ephemeron, then the retaining key of the
1997 : // ephemeron is stored in this map.
1998 : std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
1999 : // For each index inthe retaining_path_targets_ array this map
2000 : // stores the option of the corresponding target.
2001 : std::map<int, RetainingPathOption> retaining_path_target_option_;
2002 :
2003 : std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
2004 :
2005 : // Classes in "heap" can be friends.
2006 : friend class AlwaysAllocateScope;
2007 : friend class ArrayBufferCollector;
2008 : friend class ConcurrentMarking;
2009 : friend class GCCallbacksScope;
2010 : friend class GCTracer;
2011 : friend class MemoryController;
2012 : friend class HeapIterator;
2013 : friend class IdleScavengeObserver;
2014 : friend class IncrementalMarking;
2015 : friend class IncrementalMarkingJob;
2016 : friend class LargeObjectSpace;
2017 : template <FixedArrayVisitationMode fixed_array_mode,
2018 : TraceRetainingPathMode retaining_path_mode, typename MarkingState>
2019 : friend class MarkingVisitor;
2020 : friend class MarkCompactCollector;
2021 : friend class MarkCompactCollectorBase;
2022 : friend class MinorMarkCompactCollector;
2023 : friend class NewLargeObjectSpace;
2024 : friend class NewSpace;
2025 : friend class ObjectStatsCollector;
2026 : friend class Page;
2027 : friend class PagedSpace;
2028 : friend class ReadOnlyRoots;
2029 : friend class Scavenger;
2030 : friend class ScavengerCollector;
2031 : friend class Space;
2032 : friend class StoreBuffer;
2033 : friend class Sweeper;
2034 : friend class heap::TestMemoryAllocatorScope;
2035 :
2036 : // The allocator interface.
2037 : friend class Factory;
2038 :
2039 : // The Isolate constructs us.
2040 : friend class Isolate;
2041 :
2042 : // Used in cctest.
2043 : friend class heap::HeapTester;
2044 :
2045 : FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
2046 : FRIEND_TEST(HeapTest, ExternalLimitDefault);
2047 : FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
2048 : DISALLOW_COPY_AND_ASSIGN(Heap);
2049 : };
2050 :
2051 :
2052 : class HeapStats {
2053 : public:
2054 : static const int kStartMarker = 0xDECADE00;
2055 : static const int kEndMarker = 0xDECADE01;
2056 :
2057 : intptr_t* start_marker; // 0
2058 : size_t* ro_space_size; // 1
2059 : size_t* ro_space_capacity; // 2
2060 : size_t* new_space_size; // 3
2061 : size_t* new_space_capacity; // 4
2062 : size_t* old_space_size; // 5
2063 : size_t* old_space_capacity; // 6
2064 : size_t* code_space_size; // 7
2065 : size_t* code_space_capacity; // 8
2066 : size_t* map_space_size; // 9
2067 : size_t* map_space_capacity; // 10
2068 : size_t* lo_space_size; // 11
2069 : size_t* code_lo_space_size; // 12
2070 : size_t* global_handle_count; // 13
2071 : size_t* weak_global_handle_count; // 14
2072 : size_t* pending_global_handle_count; // 15
2073 : size_t* near_death_global_handle_count; // 16
2074 : size_t* free_global_handle_count; // 17
2075 : size_t* memory_allocator_size; // 18
2076 : size_t* memory_allocator_capacity; // 19
2077 : size_t* malloced_memory; // 20
2078 : size_t* malloced_peak_memory; // 21
2079 : size_t* objects_per_type; // 22
2080 : size_t* size_per_type; // 23
2081 : int* os_error; // 24
2082 : char* last_few_messages; // 25
2083 : char* js_stacktrace; // 26
2084 : intptr_t* end_marker; // 27
2085 : };
2086 :
2087 :
2088 : class AlwaysAllocateScope {
2089 : public:
2090 : explicit inline AlwaysAllocateScope(Isolate* isolate);
2091 : inline ~AlwaysAllocateScope();
2092 :
2093 : private:
2094 : Heap* heap_;
2095 : };
2096 :
2097 : // The CodeSpaceMemoryModificationScope can only be used by the main thread.
2098 : class CodeSpaceMemoryModificationScope {
2099 : public:
2100 : explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
2101 : inline ~CodeSpaceMemoryModificationScope();
2102 :
2103 : private:
2104 : Heap* heap_;
2105 : };
2106 :
2107 : // The CodePageCollectionMemoryModificationScope can only be used by the main
2108 : // thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
2109 : // already active.
2110 : class CodePageCollectionMemoryModificationScope {
2111 : public:
2112 : explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
2113 : inline ~CodePageCollectionMemoryModificationScope();
2114 :
2115 : private:
2116 : Heap* heap_;
2117 : };
2118 :
2119 : // The CodePageMemoryModificationScope does not check if tansitions to
2120 : // writeable and back to executable are actually allowed, i.e. the MemoryChunk
2121 : // was registered to be executable. It can be used by concurrent threads.
2122 : class CodePageMemoryModificationScope {
2123 : public:
2124 : explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
2125 : inline ~CodePageMemoryModificationScope();
2126 :
2127 : private:
2128 : MemoryChunk* chunk_;
2129 : bool scope_active_;
2130 :
2131 : // Disallow any GCs inside this scope, as a relocation of the underlying
2132 : // object would change the {MemoryChunk} that this scope targets.
2133 : DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2134 : };
2135 :
2136 : // Visitor class to verify interior pointers in spaces that do not contain
2137 : // or care about intergenerational references. All heap object pointers have to
2138 : // point into the heap to a location that has a map pointer at its first word.
2139 : // Caveat: Heap::Contains is an approximation because it can return true for
2140 : // objects in a heap space but above the allocation pointer.
2141 0 : class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2142 : public:
2143 : explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
2144 : void VisitPointers(HeapObject host, ObjectSlot start,
2145 : ObjectSlot end) override;
2146 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
2147 : MaybeObjectSlot end) override;
2148 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
2149 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
2150 :
2151 : void VisitRootPointers(Root root, const char* description,
2152 : FullObjectSlot start, FullObjectSlot end) override;
2153 :
2154 : protected:
2155 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
2156 :
2157 : template <typename TSlot>
2158 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
2159 :
2160 : virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
2161 : MaybeObjectSlot end);
2162 :
2163 : Heap* heap_;
2164 : };
2165 :
2166 :
2167 : // Verify that all objects are Smis.
2168 0 : class VerifySmisVisitor : public RootVisitor {
2169 : public:
2170 : void VisitRootPointers(Root root, const char* description,
2171 : FullObjectSlot start, FullObjectSlot end) override;
2172 : };
2173 :
2174 : // Space iterator for iterating over all the paged spaces of the heap: Map
2175 : // space, old space, code space and optionally read only space. Returns each
2176 : // space in turn, and null when it is done.
2177 : class V8_EXPORT_PRIVATE PagedSpaces {
2178 : public:
2179 : enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
2180 :
2181 : explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
2182 : SpacesSpecifier::kSweepablePagedSpaces)
2183 : : heap_(heap),
2184 : counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
2185 5597893 : : OLD_SPACE) {}
2186 : PagedSpace* next();
2187 :
2188 : private:
2189 : Heap* heap_;
2190 : int counter_;
2191 : };
2192 :
2193 :
2194 238138 : class SpaceIterator : public Malloced {
2195 : public:
2196 : explicit SpaceIterator(Heap* heap);
2197 : virtual ~SpaceIterator();
2198 :
2199 : bool has_next();
2200 : Space* next();
2201 :
2202 : private:
2203 : Heap* heap_;
2204 : int current_space_; // from enum AllocationSpace.
2205 : };
2206 :
2207 :
2208 : // A HeapIterator provides iteration over the whole heap. It
2209 : // aggregates the specific iterators for the different spaces as
2210 : // these can only iterate over one space only.
2211 : //
2212 : // HeapIterator ensures there is no allocation during its lifetime
2213 : // (using an embedded DisallowHeapAllocation instance).
2214 : //
2215 : // HeapIterator can skip free list nodes (that is, de-allocated heap
2216 : // objects that still remain in the heap). As implementation of free
2217 : // nodes filtering uses GC marks, it can't be used during MS/MC GC
2218 : // phases. Also, it is forbidden to interrupt iteration in this mode,
2219 : // as this will leave heap objects marked (and thus, unusable).
2220 : class HeapIterator {
2221 : public:
2222 : enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2223 :
2224 : explicit HeapIterator(Heap* heap,
2225 : HeapObjectsFiltering filtering = kNoFiltering);
2226 : ~HeapIterator();
2227 :
2228 : HeapObject next();
2229 :
2230 : private:
2231 : HeapObject NextObject();
2232 :
2233 : DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2234 :
2235 : Heap* heap_;
2236 : HeapObjectsFiltering filtering_;
2237 : HeapObjectsFilter* filter_;
2238 : // Space iterator for iterating all the spaces.
2239 : SpaceIterator* space_iterator_;
2240 : // Object iterator for the space currently being iterated.
2241 : std::unique_ptr<ObjectIterator> object_iterator_;
2242 : };
2243 :
2244 : // Abstract base class for checking whether a weak object should be retained.
2245 74510 : class WeakObjectRetainer {
2246 : public:
2247 172510 : virtual ~WeakObjectRetainer() = default;
2248 :
2249 : // Return whether this object should be retained. If nullptr is returned the
2250 : // object has no references. Otherwise the address of the retained object
2251 : // should be returned as in some GC situations the object has been moved.
2252 : virtual Object RetainAs(Object object) = 0;
2253 : };
2254 :
2255 : // -----------------------------------------------------------------------------
2256 : // Allows observation of allocations.
2257 : class AllocationObserver {
2258 : public:
2259 : explicit AllocationObserver(intptr_t step_size)
2260 183314 : : step_size_(step_size), bytes_to_next_step_(step_size) {
2261 : DCHECK_LE(kTaggedSize, step_size);
2262 : }
2263 183239 : virtual ~AllocationObserver() = default;
2264 :
2265 : // Called each time the observed space does an allocation step. This may be
2266 : // more frequently than the step_size we are monitoring (e.g. when there are
2267 : // multiple observers, or when page or space boundary is encountered.)
2268 : void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
2269 :
2270 : protected:
2271 : intptr_t step_size() const { return step_size_; }
2272 : intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2273 :
2274 : // Pure virtual method provided by the subclasses that gets called when at
2275 : // least step_size bytes have been allocated. soon_object is the address just
2276 : // allocated (but not yet initialized.) size is the size of the object as
2277 : // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2278 : // of:
2279 : // 1) soon_object will be nullptr in cases where we end up observing an
2280 : // allocation that happens to be a filler space (e.g. page boundaries.)
2281 : // 2) size is the requested size at the time of allocation. Right-trimming
2282 : // may change the object size dynamically.
2283 : // 3) soon_object may actually be the first object in an allocation-folding
2284 : // group. In such a case size is the size of the group rather than the
2285 : // first object.
2286 : virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2287 :
2288 : // Subclasses can override this method to make step size dynamic.
2289 105307 : virtual intptr_t GetNextStepSize() { return step_size_; }
2290 :
2291 : intptr_t step_size_;
2292 : intptr_t bytes_to_next_step_;
2293 :
2294 : private:
2295 : friend class Space;
2296 : DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2297 : };
2298 :
2299 : V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
2300 :
2301 : // -----------------------------------------------------------------------------
2302 : // Allows observation of heap object allocations.
2303 69117 : class HeapObjectAllocationTracker {
2304 : public:
2305 : virtual void AllocationEvent(Address addr, int size) = 0;
2306 35039 : virtual void MoveEvent(Address from, Address to, int size) {}
2307 2824 : virtual void UpdateObjectSizeEvent(Address addr, int size) {}
2308 69103 : virtual ~HeapObjectAllocationTracker() = default;
2309 : };
2310 :
2311 : } // namespace internal
2312 : } // namespace v8
2313 :
2314 : #endif // V8_HEAP_HEAP_H_
|