Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_HEAP_H_
6 : #define V8_HEAP_HEAP_H_
7 :
8 : #include <cmath>
9 : #include <map>
10 : #include <unordered_map>
11 : #include <unordered_set>
12 : #include <vector>
13 :
14 : // Clients of this interface shouldn't depend on lots of heap internals.
15 : // Do not include anything from src/heap here!
16 : #include "include/v8-internal.h"
17 : #include "include/v8.h"
18 : #include "src/accessors.h"
19 : #include "src/allocation.h"
20 : #include "src/assert-scope.h"
21 : #include "src/base/atomic-utils.h"
22 : #include "src/globals.h"
23 : #include "src/heap-symbols.h"
24 : #include "src/objects.h"
25 : #include "src/objects/allocation-site.h"
26 : #include "src/objects/fixed-array.h"
27 : #include "src/objects/heap-object.h"
28 : #include "src/objects/smi.h"
29 : #include "src/objects/string-table.h"
30 : #include "src/roots.h"
31 : #include "src/visitors.h"
32 : #include "testing/gtest/include/gtest/gtest_prod.h"
33 :
34 : namespace v8 {
35 :
36 : namespace debug {
37 : typedef void (*OutOfMemoryCallback)(void* data);
38 : } // namespace debug
39 :
40 : namespace internal {
41 :
42 : namespace heap {
43 : class HeapTester;
44 : class TestMemoryAllocatorScope;
45 : } // namespace heap
46 :
47 : class ObjectBoilerplateDescription;
48 : class BytecodeArray;
49 : class CodeDataContainer;
50 : class DeoptimizationData;
51 : class HandlerTable;
52 : class IncrementalMarking;
53 : class JSArrayBuffer;
54 : class ExternalString;
55 : using v8::MemoryPressureLevel;
56 :
57 : class AllocationObserver;
58 : class ArrayBufferCollector;
59 : class ArrayBufferTracker;
60 : class CodeLargeObjectSpace;
61 : class ConcurrentMarking;
62 : class GCIdleTimeHandler;
63 : class GCIdleTimeHeapState;
64 : class GCTracer;
65 : class HeapController;
66 : class HeapObjectAllocationTracker;
67 : class HeapObjectsFilter;
68 : class HeapStats;
69 : class HistogramTimer;
70 : class Isolate;
71 : class JSFinalizationGroup;
72 : class LocalEmbedderHeapTracer;
73 : class MemoryAllocator;
74 : class MemoryReducer;
75 : class MinorMarkCompactCollector;
76 : class ObjectIterator;
77 : class ObjectStats;
78 : class Page;
79 : class PagedSpace;
80 : class ReadOnlyHeap;
81 : class RootVisitor;
82 : class ScavengeJob;
83 : class Scavenger;
84 : class ScavengerCollector;
85 : class Space;
86 : class StoreBuffer;
87 : class StressScavengeObserver;
88 : class TimedHistogram;
89 : class TracePossibleWrapperReporter;
90 : class WeakObjectRetainer;
91 :
92 : enum ArrayStorageAllocationMode {
93 : DONT_INITIALIZE_ARRAY_ELEMENTS,
94 : INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
95 : };
96 :
97 : enum class ClearRecordedSlots { kYes, kNo };
98 :
99 : enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
100 :
101 : enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
102 :
103 : enum class FixedArrayVisitationMode { kRegular, kIncremental };
104 :
105 : enum class TraceRetainingPathMode { kEnabled, kDisabled };
106 :
107 : enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
108 :
109 : enum class GarbageCollectionReason {
110 : kUnknown = 0,
111 : kAllocationFailure = 1,
112 : kAllocationLimit = 2,
113 : kContextDisposal = 3,
114 : kCountersExtension = 4,
115 : kDebugger = 5,
116 : kDeserializer = 6,
117 : kExternalMemoryPressure = 7,
118 : kFinalizeMarkingViaStackGuard = 8,
119 : kFinalizeMarkingViaTask = 9,
120 : kFullHashtable = 10,
121 : kHeapProfiler = 11,
122 : kIdleTask = 12,
123 : kLastResort = 13,
124 : kLowMemoryNotification = 14,
125 : kMakeHeapIterable = 15,
126 : kMemoryPressure = 16,
127 : kMemoryReducer = 17,
128 : kRuntime = 18,
129 : kSamplingProfiler = 19,
130 : kSnapshotCreator = 20,
131 : kTesting = 21,
132 : kExternalFinalize = 22
133 : // If you add new items here, then update the incremental_marking_reason,
134 : // mark_compact_reason, and scavenge_reason counters in counters.h.
135 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
136 : };
137 :
138 : enum class YoungGenerationHandling {
139 : kRegularScavenge = 0,
140 : kFastPromotionDuringScavenge = 1,
141 : // Histogram::InspectConstructionArguments in chromium requires us to have at
142 : // least three buckets.
143 : kUnusedBucket = 2,
144 : // If you add new items here, then update the young_generation_handling in
145 : // counters.h.
146 : // Also update src/tools/metrics/histograms/histograms.xml in chromium.
147 : };
148 :
149 : enum class GCIdleTimeAction : uint8_t;
150 :
151 : class AllocationResult {
152 : public:
153 0 : static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
154 0 : return AllocationResult(space);
155 : }
156 :
157 : // Implicit constructor from Object.
158 9576 : AllocationResult(Object object) // NOLINT
159 9621 : : object_(object) {
160 : // AllocationResults can't return Smis, which are used to represent
161 : // failure and the space to retry in.
162 506968470 : CHECK(!object->IsSmi());
163 9576 : }
164 :
165 0 : AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
166 :
167 39312 : inline bool IsRetry() { return object_->IsSmi(); }
168 : inline HeapObject ToObjectChecked();
169 : inline AllocationSpace RetrySpace();
170 :
171 : template <typename T>
172 17490632 : bool To(T* obj) {
173 1032631038 : if (IsRetry()) return false;
174 69601309 : *obj = T::cast(object_);
175 17490718 : return true;
176 : }
177 :
178 : private:
179 0 : explicit AllocationResult(AllocationSpace space)
180 13775 : : object_(Smi::FromInt(static_cast<int>(space))) {}
181 :
182 : Object object_;
183 : };
184 :
185 : STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
186 :
187 : #ifdef DEBUG
188 : struct CommentStatistic {
189 : const char* comment;
190 : int size;
191 : int count;
192 : void Clear() {
193 : comment = nullptr;
194 : size = 0;
195 : count = 0;
196 : }
197 : // Must be small, since an iteration is used for lookup.
198 : static const int kMaxComments = 64;
199 : };
200 : #endif
201 :
202 430632 : class Heap {
203 : public:
204 : enum FindMementoMode { kForRuntime, kForGC };
205 :
206 : enum HeapState {
207 : NOT_IN_GC,
208 : SCAVENGE,
209 : MARK_COMPACT,
210 : MINOR_MARK_COMPACT,
211 : TEAR_DOWN
212 : };
213 :
214 : using PretenuringFeedbackMap =
215 : std::unordered_map<AllocationSite, size_t, Object::Hasher>;
216 :
217 : // Taking this mutex prevents the GC from entering a phase that relocates
218 : // object references.
219 94928 : base::Mutex* relocation_mutex() { return &relocation_mutex_; }
220 :
221 : // Support for partial snapshots. After calling this we have a linear
222 : // space to write objects in each space.
223 : struct Chunk {
224 : uint32_t size;
225 : Address start;
226 : Address end;
227 : };
228 : typedef std::vector<Chunk> Reservation;
229 :
230 : static const int kInitalOldGenerationLimitFactor = 2;
231 :
232 : #if V8_OS_ANDROID
233 : // Don't apply pointer multiplier on Android since it has no swap space and
234 : // should instead adapt it's heap size based on available physical memory.
235 : static const int kPointerMultiplier = 1;
236 : #else
237 : // TODO(ishell): kSystePointerMultiplier?
238 : static const int kPointerMultiplier = i::kSystemPointerSize / 4;
239 : #endif
240 :
241 : // Semi-space size needs to be a multiple of page size.
242 : static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
243 : static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
244 :
245 : STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
246 : STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
247 :
248 : static const int kTraceRingBufferSize = 512;
249 : static const int kStacktraceBufferSize = 512;
250 :
251 : static const int kNoGCFlags = 0;
252 : static const int kReduceMemoryFootprintMask = 1;
253 :
254 : // The minimum size of a HeapObject on the heap.
255 : static const int kMinObjectSizeInTaggedWords = 2;
256 :
257 : static const int kMinPromotedPercentForFastPromotionMode = 90;
258 :
259 : STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
260 : Internals::kUndefinedValueRootIndex);
261 : STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
262 : Internals::kTheHoleValueRootIndex);
263 : STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
264 : Internals::kNullValueRootIndex);
265 : STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
266 : Internals::kTrueValueRootIndex);
267 : STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
268 : Internals::kFalseValueRootIndex);
269 : STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
270 : Internals::kEmptyStringRootIndex);
271 :
272 : // Calculates the maximum amount of filler that could be required by the
273 : // given alignment.
274 : static int GetMaximumFillToAlign(AllocationAlignment alignment);
275 : // Calculates the actual amount of filler required for a given address at the
276 : // given alignment.
277 : static int GetFillToAlign(Address address, AllocationAlignment alignment);
278 :
279 : void FatalProcessOutOfMemory(const char* location);
280 :
281 : // Checks whether the space is valid.
282 : static bool IsValidAllocationSpace(AllocationSpace space);
283 :
284 : // Zapping is needed for verify heap, and always done in debug builds.
285 : static inline bool ShouldZapGarbage() {
286 : #ifdef DEBUG
287 : return true;
288 : #else
289 : #ifdef VERIFY_HEAP
290 : return FLAG_verify_heap;
291 : #else
292 : return false;
293 : #endif
294 : #endif
295 : }
296 :
297 : static uintptr_t ZapValue() {
298 0 : return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
299 : }
300 :
301 : static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
302 474620 : return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
303 : }
304 :
305 : static inline GarbageCollector YoungGenerationCollector() {
306 : #if ENABLE_MINOR_MC
307 20973 : return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
308 : #else
309 : return SCAVENGER;
310 : #endif // ENABLE_MINOR_MC
311 : }
312 :
313 : static inline const char* CollectorName(GarbageCollector collector) {
314 0 : switch (collector) {
315 : case SCAVENGER:
316 : return "Scavenger";
317 : case MARK_COMPACTOR:
318 : return "Mark-Compact";
319 : case MINOR_MARK_COMPACTOR:
320 : return "Minor Mark-Compact";
321 : }
322 : return "Unknown collector";
323 : }
324 :
325 : // Copy block of memory from src to dst. Size of block should be aligned
326 : // by pointer size.
327 : static inline void CopyBlock(Address dst, Address src, int byte_size);
328 :
329 : V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
330 : V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
331 : Address slot,
332 : HeapObject value);
333 : V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
334 : Heap* heap, FixedArray array, int offset, int length);
335 : V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
336 : Code host, RelocInfo* rinfo, HeapObject value);
337 : V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
338 : Address slot,
339 : HeapObject value);
340 : V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
341 : Heap* heap, HeapObject object);
342 : V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
343 : RelocInfo* rinfo,
344 : HeapObject value);
345 : V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
346 : Heap* heap, HeapObject host, HeapObject descriptor_array,
347 : int number_of_own_descriptors);
348 : V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
349 :
350 : // Notifies the heap that is ok to start marking or other activities that
351 : // should not happen during deserialization.
352 : void NotifyDeserializationComplete();
353 :
354 : void NotifyBootstrapComplete();
355 :
356 : void NotifyOldGenerationExpansion();
357 :
358 : inline Address* NewSpaceAllocationTopAddress();
359 : inline Address* NewSpaceAllocationLimitAddress();
360 : inline Address* OldSpaceAllocationTopAddress();
361 : inline Address* OldSpaceAllocationLimitAddress();
362 :
363 : // Move len elements within a given array from src_index index to dst_index
364 : // index.
365 : void MoveElements(FixedArray array, int dst_index, int src_index, int len,
366 : WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
367 :
368 : // Copy len elements from src_index of src array to dst_index of dst array.
369 : void CopyElements(FixedArray dst, FixedArray src, int dst_index,
370 : int src_index, int len, WriteBarrierMode mode);
371 :
372 : // Initialize a filler object to keep the ability to iterate over the heap
373 : // when introducing gaps within pages. If slots could have been recorded in
374 : // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
375 : // pass ClearRecordedSlots::kNo. If the memory after the object header of
376 : // the filler should be cleared, pass in kClearFreedMemory. The default is
377 : // kDontClearFreedMemory.
378 : V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
379 : Address addr, int size, ClearRecordedSlots clear_slots_mode,
380 : ClearFreedMemoryMode clear_memory_mode =
381 : ClearFreedMemoryMode::kDontClearFreedMemory);
382 :
383 : template <typename T>
384 : void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
385 :
386 : bool CanMoveObjectStart(HeapObject object);
387 :
388 : bool IsImmovable(HeapObject object);
389 :
390 : static bool IsLargeObject(HeapObject object);
391 :
392 : // Trim the given array from the left. Note that this relocates the object
393 : // start and hence is only valid if there is only a single reference to it.
394 : FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
395 :
396 : // Trim the given array from the right.
397 : void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
398 : void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
399 :
400 : // Converts the given boolean condition to JavaScript boolean value.
401 : inline Oddball ToBoolean(bool condition);
402 :
403 : // Notify the heap that a context has been disposed.
404 : int NotifyContextDisposed(bool dependant_context);
405 :
406 56 : void set_native_contexts_list(Object object) {
407 382436 : native_contexts_list_ = object;
408 56 : }
409 : Object native_contexts_list() const { return native_contexts_list_; }
410 :
411 56 : void set_allocation_sites_list(Object object) {
412 366602 : allocation_sites_list_ = object;
413 56 : }
414 : Object allocation_sites_list() { return allocation_sites_list_; }
415 :
416 : // Used in CreateAllocationSiteStub and the (de)serializer.
417 : Address allocation_sites_list_address() {
418 61926 : return reinterpret_cast<Address>(&allocation_sites_list_);
419 : }
420 :
421 : // Traverse all the allocaions_sites [nested_site and weak_next] in the list
422 : // and foreach call the visitor
423 : void ForeachAllocationSite(
424 : Object list, const std::function<void(AllocationSite)>& visitor);
425 :
426 : // Number of mark-sweeps.
427 20 : int ms_count() const { return ms_count_; }
428 :
429 : // Checks whether the given object is allowed to be migrated from it's
430 : // current space into the given destination space. Used for debugging.
431 : bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest);
432 :
433 : void CheckHandleCount();
434 :
435 : // Number of "runtime allocations" done so far.
436 : uint32_t allocations_count() { return allocations_count_; }
437 :
438 : // Print short heap statistics.
439 : void PrintShortHeapStatistics();
440 :
441 112 : bool write_protect_code_memory() const { return write_protect_code_memory_; }
442 :
443 : uintptr_t code_space_memory_modification_scope_depth() {
444 : return code_space_memory_modification_scope_depth_;
445 : }
446 :
447 56 : void increment_code_space_memory_modification_scope_depth() {
448 285798 : code_space_memory_modification_scope_depth_++;
449 56 : }
450 :
451 56 : void decrement_code_space_memory_modification_scope_depth() {
452 285798 : code_space_memory_modification_scope_depth_--;
453 56 : }
454 :
455 : void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
456 : void UnprotectAndRegisterMemoryChunk(HeapObject object);
457 : void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
458 : V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
459 :
460 : void EnableUnprotectedMemoryChunksRegistry() {
461 1773829 : unprotected_memory_chunks_registry_enabled_ = true;
462 : }
463 :
464 : void DisableUnprotectedMemoryChunksRegistry() {
465 1773834 : unprotected_memory_chunks_registry_enabled_ = false;
466 : }
467 :
468 : bool unprotected_memory_chunks_registry_enabled() {
469 : return unprotected_memory_chunks_registry_enabled_;
470 : }
471 :
472 : inline HeapState gc_state() { return gc_state_; }
473 : void SetGCState(HeapState state);
474 : bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
475 :
476 : inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
477 :
478 : // If an object has an AllocationMemento trailing it, return it, otherwise
479 : // return a null AllocationMemento.
480 : template <FindMementoMode mode>
481 : inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
482 :
483 : // Returns false if not able to reserve.
484 : bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
485 :
486 : //
487 : // Support for the API.
488 : //
489 :
490 : void CreateApiObjects();
491 :
492 : // Implements the corresponding V8 API function.
493 : bool IdleNotification(double deadline_in_seconds);
494 : bool IdleNotification(int idle_time_in_ms);
495 :
496 : void MemoryPressureNotification(MemoryPressureLevel level,
497 : bool is_isolate_locked);
498 : void CheckMemoryPressure();
499 :
500 : void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
501 : void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
502 : size_t heap_limit);
503 : void AutomaticallyRestoreInitialHeapLimit(double threshold_percent);
504 :
505 : double MonotonicallyIncreasingTimeInMs();
506 :
507 : void RecordStats(HeapStats* stats, bool take_snapshot = false);
508 :
509 : // Check new space expansion criteria and expand semispaces if it was hit.
510 : void CheckNewSpaceExpansionCriteria();
511 :
512 : void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
513 :
514 : // An object should be promoted if the object has survived a
515 : // scavenge operation.
516 : inline bool ShouldBePromoted(Address old_address);
517 :
518 : void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
519 :
520 : inline int NextScriptId();
521 : inline int NextDebuggingId();
522 : inline int GetNextTemplateSerialNumber();
523 :
524 : void SetSerializedObjects(FixedArray objects);
525 : void SetSerializedGlobalProxySizes(FixedArray sizes);
526 :
527 : // For post mortem debugging.
528 : void RememberUnmappedPage(Address page, bool compacted);
529 :
530 979806 : int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
531 :
532 : V8_INLINE int64_t external_memory();
533 : V8_INLINE void update_external_memory(int64_t delta);
534 : V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
535 : V8_INLINE void account_external_memory_concurrently_freed();
536 :
537 : size_t backing_store_bytes() const { return backing_store_bytes_; }
538 :
539 : void CompactWeakArrayLists(AllocationType allocation);
540 :
541 : void AddRetainedMap(Handle<Map> map);
542 :
543 : // This event is triggered after successful allocation of a new object made
544 : // by runtime. Allocations of target space for object evacuation do not
545 : // trigger the event. In order to track ALL allocations one must turn off
546 : // FLAG_inline_new.
547 : inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
548 :
549 : // This event is triggered after object is moved to a new place.
550 : void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
551 :
552 : inline bool CanAllocateInReadOnlySpace();
553 : bool deserialization_complete() const { return deserialization_complete_; }
554 :
555 : bool HasLowAllocationRate();
556 : bool HasHighFragmentation();
557 : bool HasHighFragmentation(size_t used, size_t committed);
558 :
559 : void ActivateMemoryReducerIfNeeded();
560 :
561 : bool ShouldOptimizeForMemoryUsage();
562 :
563 : bool HighMemoryPressure() {
564 : return memory_pressure_level_ != MemoryPressureLevel::kNone;
565 : }
566 :
567 5 : void RestoreHeapLimit(size_t heap_limit) {
568 : // Do not set the limit lower than the live size + some slack.
569 5 : size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
570 : max_old_generation_size_ =
571 10 : Min(max_old_generation_size_, Max(heap_limit, min_limit));
572 5 : }
573 :
574 : // ===========================================================================
575 : // Initialization. ===========================================================
576 : // ===========================================================================
577 :
578 : // Configure heap sizes
579 : // max_semi_space_size_in_kb: maximum semi-space size in KB
580 : // max_old_generation_size_in_mb: maximum old generation size in MB
581 : // code_range_size_in_mb: code range size in MB
582 : void ConfigureHeap(size_t max_semi_space_size_in_kb,
583 : size_t max_old_generation_size_in_mb,
584 : size_t code_range_size_in_mb);
585 : void ConfigureHeapDefault();
586 :
587 : // Prepares the heap, setting up for deserialization.
588 : void SetUp();
589 :
590 : // Sets read-only heap and space.
591 : void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
592 :
593 : // Sets up the heap memory without creating any objects.
594 : void SetUpSpaces();
595 :
596 : // (Re-)Initialize hash seed from flag or RNG.
597 : void InitializeHashSeed();
598 :
599 : // Bootstraps the object heap with the core set of objects required to run.
600 : // Returns whether it succeeded.
601 : bool CreateHeapObjects();
602 :
603 : // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
604 : void CreateObjectStats();
605 :
606 : // Sets the TearDown state, so no new GC tasks get posted.
607 : void StartTearDown();
608 :
609 : // Destroys all memory allocated by the heap.
610 : void TearDown();
611 :
612 : // Returns whether SetUp has been called.
613 : bool HasBeenSetUp();
614 :
615 : // ===========================================================================
616 : // Getters for spaces. =======================================================
617 : // ===========================================================================
618 :
619 : inline Address NewSpaceTop();
620 :
621 : NewSpace* new_space() { return new_space_; }
622 : OldSpace* old_space() { return old_space_; }
623 112 : CodeSpace* code_space() { return code_space_; }
624 : MapSpace* map_space() { return map_space_; }
625 : LargeObjectSpace* lo_space() { return lo_space_; }
626 112 : CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
627 : NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
628 : ReadOnlySpace* read_only_space() { return read_only_space_; }
629 :
630 : inline PagedSpace* paged_space(int idx);
631 : inline Space* space(int idx);
632 :
633 : // Returns name of the space.
634 : const char* GetSpaceName(int idx);
635 :
636 : // ===========================================================================
637 : // Getters to other components. ==============================================
638 : // ===========================================================================
639 :
640 : ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
641 :
642 : GCTracer* tracer() { return tracer_.get(); }
643 :
644 84840 : MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
645 :
646 : inline Isolate* isolate();
647 :
648 : MarkCompactCollector* mark_compact_collector() {
649 : return mark_compact_collector_.get();
650 : }
651 :
652 : MinorMarkCompactCollector* minor_mark_compact_collector() {
653 : return minor_mark_compact_collector_;
654 : }
655 :
656 : ArrayBufferCollector* array_buffer_collector() {
657 : return array_buffer_collector_.get();
658 : }
659 :
660 : // ===========================================================================
661 : // Root set access. ==========================================================
662 : // ===========================================================================
663 :
664 : // Shortcut to the roots table stored in the Isolate.
665 : V8_INLINE RootsTable& roots_table();
666 :
667 : // Heap root getters.
668 : #define ROOT_ACCESSOR(type, name, CamelName) inline type name();
669 : MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
670 : #undef ROOT_ACCESSOR
671 :
672 : V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
673 : V8_INLINE void SetRootScriptList(Object value);
674 : V8_INLINE void SetRootStringTable(StringTable value);
675 : V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
676 : V8_INLINE void SetMessageListeners(TemplateList value);
677 : V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
678 :
679 : // Set the stack limit in the roots table. Some architectures generate
680 : // code that looks here, because it is faster than loading from the static
681 : // jslimit_/real_jslimit_ variable in the StackGuard.
682 : void SetStackLimits();
683 :
684 : // The stack limit is thread-dependent. To be able to reproduce the same
685 : // snapshot blob, we need to reset it before serializing.
686 : void ClearStackLimits();
687 :
688 : void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
689 : void UnregisterStrongRoots(FullObjectSlot start);
690 :
691 : void SetBuiltinsConstantsTable(FixedArray cache);
692 :
693 : // A full copy of the interpreter entry trampoline, used as a template to
694 : // create copies of the builtin at runtime. The copies are used to create
695 : // better profiling information for ticks in bytecode execution. Note that
696 : // this is always a copy of the full builtin, i.e. not the off-heap
697 : // trampoline.
698 : // See also: FLAG_interpreted_frames_native_stack.
699 : void SetInterpreterEntryTrampolineForProfiling(Code code);
700 :
701 : // Add finalization_group into the dirty_js_finalization_groups list.
702 : void AddDirtyJSFinalizationGroup(
703 : JSFinalizationGroup finalization_group,
704 : std::function<void(HeapObject object, ObjectSlot slot, Object target)>
705 : gc_notify_updated_slot);
706 :
707 : void AddKeepDuringJobTarget(Handle<JSReceiver> target);
708 : void ClearKeepDuringJobSet();
709 :
710 : // ===========================================================================
711 : // Inline allocation. ========================================================
712 : // ===========================================================================
713 :
714 : // Indicates whether inline bump-pointer allocation has been disabled.
715 : bool inline_allocation_disabled() { return inline_allocation_disabled_; }
716 :
717 : // Switch whether inline bump-pointer allocation should be used.
718 : void EnableInlineAllocation();
719 : void DisableInlineAllocation();
720 :
721 : // ===========================================================================
722 : // Methods triggering GCs. ===================================================
723 : // ===========================================================================
724 :
725 : // Performs garbage collection operation.
726 : // Returns whether there is a chance that another major GC could
727 : // collect more garbage.
728 : V8_EXPORT_PRIVATE bool CollectGarbage(
729 : AllocationSpace space, GarbageCollectionReason gc_reason,
730 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
731 :
732 : // Performs a full garbage collection.
733 : V8_EXPORT_PRIVATE void CollectAllGarbage(
734 : int flags, GarbageCollectionReason gc_reason,
735 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
736 :
737 : // Last hope GC, should try to squeeze as much as possible.
738 : void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
739 :
740 : // Precise garbage collection that potentially finalizes already running
741 : // incremental marking before performing an atomic garbage collection.
742 : // Only use if absolutely necessary or in tests to avoid floating garbage!
743 : void PreciseCollectAllGarbage(
744 : int flags, GarbageCollectionReason gc_reason,
745 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
746 :
747 : // Reports and external memory pressure event, either performs a major GC or
748 : // completes incremental marking in order to free external resources.
749 : void ReportExternalMemoryPressure();
750 :
751 : typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
752 : GetExternallyAllocatedMemoryInBytesCallback;
753 :
754 : void SetGetExternallyAllocatedMemoryInBytesCallback(
755 : GetExternallyAllocatedMemoryInBytesCallback callback) {
756 61534 : external_memory_callback_ = callback;
757 : }
758 :
759 : // Invoked when GC was requested via the stack guard.
760 : void HandleGCRequest();
761 :
762 : // ===========================================================================
763 : // Builtins. =================================================================
764 : // ===========================================================================
765 :
766 : Code builtin(int index);
767 : Address builtin_address(int index);
768 : void set_builtin(int index, Code builtin);
769 :
770 : // ===========================================================================
771 : // Iterators. ================================================================
772 : // ===========================================================================
773 :
774 : // None of these methods iterate over the read-only roots. To do this use
775 : // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
776 : // garbage collection and is usually only performed as part of
777 : // (de)serialization or heap verification.
778 :
779 : // Iterates over the strong roots and the weak roots.
780 : void IterateRoots(RootVisitor* v, VisitMode mode);
781 : // Iterates over the strong roots.
782 : void IterateStrongRoots(RootVisitor* v, VisitMode mode);
783 : // Iterates over entries in the smi roots list. Only interesting to the
784 : // serializer/deserializer, since GC does not care about smis.
785 : void IterateSmiRoots(RootVisitor* v);
786 : // Iterates over weak string tables.
787 : void IterateWeakRoots(RootVisitor* v, VisitMode mode);
788 : // Iterates over weak global handles.
789 : void IterateWeakGlobalHandles(RootVisitor* v);
790 : // Iterates over builtins.
791 : void IterateBuiltins(RootVisitor* v);
792 :
793 : // ===========================================================================
794 : // Store buffer API. =========================================================
795 : // ===========================================================================
796 :
797 : // Used for query incremental marking status in generated code.
798 : Address* IsMarkingFlagAddress() {
799 61590 : return reinterpret_cast<Address*>(&is_marking_flag_);
800 : }
801 :
802 155278 : void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
803 :
804 : Address* store_buffer_top_address();
805 : static intptr_t store_buffer_mask_constant();
806 : static Address store_buffer_overflow_function_address();
807 :
808 : void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
809 : void ClearRecordedSlotRange(Address start, Address end);
810 :
811 : #ifdef DEBUG
812 : void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
813 : #endif
814 :
815 : // ===========================================================================
816 : // Incremental marking API. ==================================================
817 : // ===========================================================================
818 :
819 : int GCFlagsForIncrementalMarking() {
820 1641532 : return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
821 1641537 : : kNoGCFlags;
822 : }
823 :
824 : // Start incremental marking and ensure that idle time handler can perform
825 : // incremental steps.
826 : void StartIdleIncrementalMarking(
827 : GarbageCollectionReason gc_reason,
828 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
829 :
830 : // Starts incremental marking assuming incremental marking is currently
831 : // stopped.
832 : void StartIncrementalMarking(
833 : int gc_flags, GarbageCollectionReason gc_reason,
834 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
835 :
836 : void StartIncrementalMarkingIfAllocationLimitIsReached(
837 : int gc_flags,
838 : GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
839 :
840 : void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
841 : // Synchronously finalizes incremental marking.
842 : void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
843 :
844 : void RegisterDeserializedObjectsForBlackAllocation(
845 : Reservation* reservations, const std::vector<HeapObject>& large_objects,
846 : const std::vector<Address>& maps);
847 :
848 : IncrementalMarking* incremental_marking() {
849 : return incremental_marking_.get();
850 : }
851 :
852 : // ===========================================================================
853 : // Concurrent marking API. ===================================================
854 : // ===========================================================================
855 :
856 : ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
857 :
858 : // The runtime uses this function to notify potentially unsafe object layout
859 : // changes that require special synchronization with the concurrent marker.
860 : // The old size is the size of the object before layout change.
861 : void NotifyObjectLayoutChange(HeapObject object, int old_size,
862 : const DisallowHeapAllocation&);
863 :
864 : #ifdef VERIFY_HEAP
865 : // This function checks that either
866 : // - the map transition is safe,
867 : // - or it was communicated to GC using NotifyObjectLayoutChange.
868 : void VerifyObjectLayoutChange(HeapObject object, Map new_map);
869 : #endif
870 :
871 : // ===========================================================================
872 : // Deoptimization support API. ===============================================
873 : // ===========================================================================
874 :
875 : // Setters for code offsets of well-known deoptimization targets.
876 : void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
877 : void SetConstructStubCreateDeoptPCOffset(int pc_offset);
878 : void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
879 : void SetInterpreterEntryReturnPCOffset(int pc_offset);
880 :
881 : // Invalidates references in the given {code} object that are referenced
882 : // transitively from the deoptimization data. Mutates write-protected code.
883 : void InvalidateCodeDeoptimizationData(Code code);
884 :
885 : void DeoptMarkedAllocationSites();
886 :
887 : bool DeoptMaybeTenuredAllocationSites();
888 :
889 : // ===========================================================================
890 : // Embedder heap tracer support. =============================================
891 : // ===========================================================================
892 :
893 : LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
894 : return local_embedder_heap_tracer_.get();
895 : }
896 :
897 : void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
898 : EmbedderHeapTracer* GetEmbedderHeapTracer() const;
899 :
900 : void RegisterExternallyReferencedObject(Address* location);
901 : void SetEmbedderStackStateForNextFinalizaton(
902 : EmbedderHeapTracer::EmbedderStackState stack_state);
903 :
904 : // ===========================================================================
905 : // External string table API. ================================================
906 : // ===========================================================================
907 :
908 : // Registers an external string.
909 : inline void RegisterExternalString(String string);
910 :
911 : // Called when a string's resource is changed. The size of the payload is sent
912 : // as argument of the method.
913 : void UpdateExternalString(String string, size_t old_payload,
914 : size_t new_payload);
915 :
916 : // Finalizes an external string by deleting the associated external
917 : // data and clearing the resource pointer.
918 : inline void FinalizeExternalString(String string);
919 :
920 : static String UpdateYoungReferenceInExternalStringTableEntry(
921 : Heap* heap, FullObjectSlot pointer);
922 :
923 : // ===========================================================================
924 : // Methods checking/returning the space of a given object/address. ===========
925 : // ===========================================================================
926 :
927 : // Returns whether the object resides in new space.
928 : static inline bool InYoungGeneration(Object object);
929 : static inline bool InYoungGeneration(MaybeObject object);
930 : static inline bool InYoungGeneration(HeapObject heap_object);
931 : static inline bool InFromPage(Object object);
932 : static inline bool InFromPage(MaybeObject object);
933 : static inline bool InFromPage(HeapObject heap_object);
934 : static inline bool InToPage(Object object);
935 : static inline bool InToPage(MaybeObject object);
936 : static inline bool InToPage(HeapObject heap_object);
937 :
938 : // Returns whether the object resides in old space.
939 : inline bool InOldSpace(Object object);
940 :
941 : // Checks whether an address/object in the heap (including auxiliary
942 : // area and unused area).
943 : bool Contains(HeapObject value);
944 :
945 : // Checks whether an address/object in a space.
946 : // Currently used by tests, serialization and heap verification only.
947 : bool InSpace(HeapObject value, AllocationSpace space);
948 :
949 : // Slow methods that can be used for verification as they can also be used
950 : // with off-heap Addresses.
951 : bool InSpaceSlow(Address addr, AllocationSpace space);
952 :
953 : static inline Heap* FromWritableHeapObject(const HeapObject obj);
954 :
955 : // ===========================================================================
956 : // Object statistics tracking. ===============================================
957 : // ===========================================================================
958 :
959 : // Returns the number of buckets used by object statistics tracking during a
960 : // major GC. Note that the following methods fail gracefully when the bounds
961 : // are exceeded though.
962 : size_t NumberOfTrackedHeapObjectTypes();
963 :
964 : // Returns object statistics about count and size at the last major GC.
965 : // Objects are being grouped into buckets that roughly resemble existing
966 : // instance types.
967 : size_t ObjectCountAtLastGC(size_t index);
968 : size_t ObjectSizeAtLastGC(size_t index);
969 :
970 : // Retrieves names of buckets used by object statistics tracking.
971 : bool GetObjectTypeName(size_t index, const char** object_type,
972 : const char** object_sub_type);
973 :
974 : // The total number of native contexts object on the heap.
975 : size_t NumberOfNativeContexts();
976 : // The total number of native contexts that were detached but were not
977 : // garbage collected yet.
978 : size_t NumberOfDetachedContexts();
979 :
980 : // ===========================================================================
981 : // Code statistics. ==========================================================
982 : // ===========================================================================
983 :
984 : // Collect code (Code and BytecodeArray objects) statistics.
985 : void CollectCodeStatistics();
986 :
987 : // ===========================================================================
988 : // GC statistics. ============================================================
989 : // ===========================================================================
990 :
991 : // Returns the maximum amount of memory reserved for the heap.
992 : size_t MaxReserved();
993 : size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
994 : size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
995 : size_t MaxOldGenerationSize() { return max_old_generation_size_; }
996 :
997 : V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
998 : uint64_t physical_memory);
999 :
1000 : static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
1001 : const uint64_t min_physical_memory = 512 * MB;
1002 : const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
1003 :
1004 : uint64_t capped_physical_memory =
1005 : Max(Min(physical_memory, max_physical_memory), min_physical_memory);
1006 : // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
1007 : size_t semi_space_size_in_kb =
1008 29802 : static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
1009 29802 : (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
1010 : (max_physical_memory - min_physical_memory) +
1011 : kMinSemiSpaceSizeInKB);
1012 : return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
1013 : }
1014 :
1015 : // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1016 : // more spaces are needed until it reaches the limit.
1017 : size_t Capacity();
1018 :
1019 : // Returns the capacity of the old generation.
1020 : size_t OldGenerationCapacity();
1021 :
1022 : // Returns the amount of memory currently held alive by the unmapper.
1023 : size_t CommittedMemoryOfUnmapper();
1024 :
1025 : // Returns the amount of memory currently committed for the heap.
1026 : size_t CommittedMemory();
1027 :
1028 : // Returns the amount of memory currently committed for the old space.
1029 : size_t CommittedOldGenerationMemory();
1030 :
1031 : // Returns the amount of executable memory currently committed for the heap.
1032 : size_t CommittedMemoryExecutable();
1033 :
1034 : // Returns the amount of phyical memory currently committed for the heap.
1035 : size_t CommittedPhysicalMemory();
1036 :
1037 : // Returns the maximum amount of memory ever committed for the heap.
1038 : size_t MaximumCommittedMemory() { return maximum_committed_; }
1039 :
1040 : // Updates the maximum committed memory for the heap. Should be called
1041 : // whenever a space grows.
1042 : void UpdateMaximumCommitted();
1043 :
1044 : // Returns the available bytes in space w/o growing.
1045 : // Heap doesn't guarantee that it can allocate an object that requires
1046 : // all available bytes. Check MaxHeapObjectSize() instead.
1047 : size_t Available();
1048 :
1049 : // Returns of size of all objects residing in the heap.
1050 : size_t SizeOfObjects();
1051 :
1052 : void UpdateSurvivalStatistics(int start_new_space_size);
1053 :
1054 : inline void IncrementPromotedObjectsSize(size_t object_size) {
1055 111048 : promoted_objects_size_ += object_size;
1056 : }
1057 : inline size_t promoted_objects_size() { return promoted_objects_size_; }
1058 :
1059 : inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1060 111048 : semi_space_copied_object_size_ += object_size;
1061 : }
1062 : inline size_t semi_space_copied_object_size() {
1063 : return semi_space_copied_object_size_;
1064 : }
1065 :
1066 : inline size_t SurvivedYoungObjectSize() {
1067 136854 : return promoted_objects_size_ + semi_space_copied_object_size_;
1068 : }
1069 :
1070 2511751 : inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1071 :
1072 1518643 : inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1073 :
1074 327870 : inline void IncrementNodesPromoted() { nodes_promoted_++; }
1075 :
1076 : inline void IncrementYoungSurvivorsCounter(size_t survived) {
1077 98087 : survived_last_scavenge_ = survived;
1078 98087 : survived_since_last_expansion_ += survived;
1079 : }
1080 :
1081 453619 : inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() {
1082 907849 : return OldGenerationSizeOfObjects() + PromotedExternalMemorySize();
1083 : }
1084 :
1085 : inline void UpdateNewSpaceAllocationCounter();
1086 :
1087 : inline size_t NewSpaceAllocationCounter();
1088 :
1089 : // This should be used only for testing.
1090 : void set_new_space_allocation_counter(size_t new_value) {
1091 5 : new_space_allocation_counter_ = new_value;
1092 : }
1093 :
1094 : void UpdateOldGenerationAllocationCounter() {
1095 : old_generation_allocation_counter_at_last_gc_ =
1096 73955 : OldGenerationAllocationCounter();
1097 73955 : old_generation_size_at_last_gc_ = 0;
1098 : }
1099 :
1100 : size_t OldGenerationAllocationCounter() {
1101 230299 : return old_generation_allocation_counter_at_last_gc_ +
1102 230304 : PromotedSinceLastGC();
1103 : }
1104 :
1105 : // This should be used only for testing.
1106 : void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1107 5 : old_generation_allocation_counter_at_last_gc_ = new_value;
1108 : }
1109 :
1110 : size_t PromotedSinceLastGC() {
1111 230304 : size_t old_generation_size = OldGenerationSizeOfObjects();
1112 : DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
1113 230304 : return old_generation_size - old_generation_size_at_last_gc_;
1114 : }
1115 :
1116 : // This is called by the sweeper when it discovers more free space
1117 : // than expected at the end of the preceding GC.
1118 : void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
1119 11940 : if (old_generation_size_at_last_gc_ != 0) {
1120 : // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|.
1121 : // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC
1122 : // continues to increase monotonically, rather than decreasing here.
1123 : DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
1124 6373 : old_generation_size_at_last_gc_ -= decreased_bytes;
1125 : }
1126 : }
1127 :
1128 68384291 : int gc_count() const { return gc_count_; }
1129 :
1130 : bool is_current_gc_forced() const { return is_current_gc_forced_; }
1131 :
1132 : // Returns the size of objects residing in non-new spaces.
1133 : // Excludes external memory held by those objects.
1134 : size_t OldGenerationSizeOfObjects();
1135 :
1136 : // ===========================================================================
1137 : // Prologue/epilogue callback methods.========================================
1138 : // ===========================================================================
1139 :
1140 : void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1141 : GCType gc_type_filter, void* data);
1142 : void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1143 : void* data);
1144 :
1145 : void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1146 : GCType gc_type_filter, void* data);
1147 : void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1148 : void* data);
1149 :
1150 : void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1151 : void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1152 :
1153 : // ===========================================================================
1154 : // Allocation methods. =======================================================
1155 : // ===========================================================================
1156 :
1157 : // Creates a filler object and returns a heap object immediately after it.
1158 : V8_WARN_UNUSED_RESULT HeapObject PrecedeWithFiller(HeapObject object,
1159 : int filler_size);
1160 :
1161 : // Creates a filler object if needed for alignment and returns a heap object
1162 : // immediately after it. If any space is left after the returned object,
1163 : // another filler object is created so the over allocated memory is iterable.
1164 : V8_WARN_UNUSED_RESULT HeapObject
1165 : AlignWithFiller(HeapObject object, int object_size, int allocation_size,
1166 : AllocationAlignment alignment);
1167 :
1168 : // ===========================================================================
1169 : // ArrayBuffer tracking. =====================================================
1170 : // ===========================================================================
1171 :
1172 : // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1173 : // in the registration/unregistration APIs. Consider dropping the "New" from
1174 : // "RegisterNewArrayBuffer" because one can re-register a previously
1175 : // unregistered buffer, too, and the name is confusing.
1176 : void RegisterNewArrayBuffer(JSArrayBuffer buffer);
1177 : void UnregisterArrayBuffer(JSArrayBuffer buffer);
1178 :
1179 : // ===========================================================================
1180 : // Allocation site tracking. =================================================
1181 : // ===========================================================================
1182 :
1183 : // Updates the AllocationSite of a given {object}. The entry (including the
1184 : // count) is cached on the local pretenuring feedback.
1185 : inline void UpdateAllocationSite(
1186 : Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
1187 :
1188 : // Merges local pretenuring feedback into the global one. Note that this
1189 : // method needs to be called after evacuation, as allocation sites may be
1190 : // evacuated and this method resolves forward pointers accordingly.
1191 : void MergeAllocationSitePretenuringFeedback(
1192 : const PretenuringFeedbackMap& local_pretenuring_feedback);
1193 :
1194 : // ===========================================================================
1195 : // Allocation tracking. ======================================================
1196 : // ===========================================================================
1197 :
1198 : // Adds {new_space_observer} to new space and {observer} to any other space.
1199 : void AddAllocationObserversToAllSpaces(
1200 : AllocationObserver* observer, AllocationObserver* new_space_observer);
1201 :
1202 : // Removes {new_space_observer} from new space and {observer} from any other
1203 : // space.
1204 : void RemoveAllocationObserversFromAllSpaces(
1205 : AllocationObserver* observer, AllocationObserver* new_space_observer);
1206 :
1207 : bool allocation_step_in_progress() { return allocation_step_in_progress_; }
1208 : void set_allocation_step_in_progress(bool val) {
1209 44705506 : allocation_step_in_progress_ = val;
1210 : }
1211 :
1212 : // ===========================================================================
1213 : // Heap object allocation tracking. ==========================================
1214 : // ===========================================================================
1215 :
1216 : void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1217 : void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1218 : bool has_heap_object_allocation_tracker() const {
1219 : return !allocation_trackers_.empty();
1220 : }
1221 :
1222 : // ===========================================================================
1223 : // Retaining path tracking. ==================================================
1224 : // ===========================================================================
1225 :
1226 : // Adds the given object to the weak table of retaining path targets.
1227 : // On each GC if the marker discovers the object, it will print the retaining
1228 : // path. This requires --track-retaining-path flag.
1229 : void AddRetainingPathTarget(Handle<HeapObject> object,
1230 : RetainingPathOption option);
1231 :
1232 : // ===========================================================================
1233 : // Stack frame support. ======================================================
1234 : // ===========================================================================
1235 :
1236 : // Returns the Code object for a given interior pointer.
1237 : Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
1238 :
1239 : // Returns true if {addr} is contained within {code} and false otherwise.
1240 : // Mostly useful for debugging.
1241 : bool GcSafeCodeContains(Code code, Address addr);
1242 :
1243 : // =============================================================================
1244 : #ifdef VERIFY_HEAP
1245 : // Verify the heap is in its normal state before or after a GC.
1246 : void Verify();
1247 : void VerifyRememberedSetFor(HeapObject object);
1248 : #endif
1249 :
1250 : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1251 : void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1252 : #endif
1253 :
1254 : #ifdef DEBUG
1255 : void VerifyCountersAfterSweeping();
1256 : void VerifyCountersBeforeConcurrentSweeping();
1257 :
1258 : void Print();
1259 : void PrintHandles();
1260 :
1261 : // Report code statistics.
1262 : void ReportCodeStatistics(const char* title);
1263 : #endif
1264 : void* GetRandomMmapAddr() {
1265 930618 : void* result = v8::internal::GetRandomMmapAddr();
1266 : #if V8_TARGET_ARCH_X64
1267 : #if V8_OS_MACOSX
1268 : // The Darwin kernel [as of macOS 10.12.5] does not clean up page
1269 : // directory entries [PDE] created from mmap or mach_vm_allocate, even
1270 : // after the region is destroyed. Using a virtual address space that is
1271 : // too large causes a leak of about 1 wired [can never be paged out] page
1272 : // per call to mmap(). The page is only reclaimed when the process is
1273 : // killed. Confine the hint to a 32-bit section of the virtual address
1274 : // space. See crbug.com/700928.
1275 : uintptr_t offset =
1276 : reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
1277 : kMmapRegionMask;
1278 : result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1279 : #endif // V8_OS_MACOSX
1280 : #endif // V8_TARGET_ARCH_X64
1281 : return result;
1282 : }
1283 :
1284 : static const char* GarbageCollectionReasonToString(
1285 : GarbageCollectionReason gc_reason);
1286 :
1287 : // Calculates the nof entries for the full sized number to string cache.
1288 : inline int MaxNumberToStringCacheSize() const;
1289 :
1290 : private:
1291 : class SkipStoreBufferScope;
1292 :
1293 : typedef String (*ExternalStringTableUpdaterCallback)(Heap* heap,
1294 : FullObjectSlot pointer);
1295 :
1296 : // External strings table is a place where all external strings are
1297 : // registered. We need to keep track of such strings to properly
1298 : // finalize them.
1299 123038 : class ExternalStringTable {
1300 : public:
1301 61534 : explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1302 :
1303 : // Registers an external string.
1304 : inline void AddString(String string);
1305 : bool Contains(String string);
1306 :
1307 : void IterateAll(RootVisitor* v);
1308 : void IterateYoung(RootVisitor* v);
1309 : void PromoteYoung();
1310 :
1311 : // Restores internal invariant and gets rid of collected strings. Must be
1312 : // called after each Iterate*() that modified the strings.
1313 : void CleanUpAll();
1314 : void CleanUpYoung();
1315 :
1316 : // Finalize all registered external strings and clear tables.
1317 : void TearDown();
1318 :
1319 : void UpdateYoungReferences(
1320 : Heap::ExternalStringTableUpdaterCallback updater_func);
1321 : void UpdateReferences(
1322 : Heap::ExternalStringTableUpdaterCallback updater_func);
1323 :
1324 : private:
1325 : void Verify();
1326 : void VerifyYoung();
1327 :
1328 : Heap* const heap_;
1329 :
1330 : // To speed up scavenge collections young string are kept separate from old
1331 : // strings.
1332 : std::vector<Object> young_strings_;
1333 : std::vector<Object> old_strings_;
1334 :
1335 : DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1336 : };
1337 :
1338 : struct StrongRootsList;
1339 :
1340 : struct StringTypeTable {
1341 : InstanceType type;
1342 : int size;
1343 : RootIndex index;
1344 : };
1345 :
1346 : struct ConstantStringTable {
1347 : const char* contents;
1348 : RootIndex index;
1349 : };
1350 :
1351 : struct StructTable {
1352 : InstanceType type;
1353 : int size;
1354 : RootIndex index;
1355 : };
1356 :
1357 8417 : struct GCCallbackTuple {
1358 : GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
1359 : void* data)
1360 69951 : : callback(callback), gc_type(gc_type), data(data) {}
1361 :
1362 : bool operator==(const GCCallbackTuple& other) const;
1363 : GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
1364 :
1365 : v8::Isolate::GCCallbackWithData callback;
1366 : GCType gc_type;
1367 : void* data;
1368 : };
1369 :
1370 : static const int kInitialStringTableSize = StringTable::kMinCapacity;
1371 : static const int kInitialEvalCacheSize = 64;
1372 : static const int kInitialNumberStringCacheSize = 256;
1373 :
1374 : static const int kRememberedUnmappedPages = 128;
1375 :
1376 : static const StringTypeTable string_type_table[];
1377 : static const ConstantStringTable constant_string_table[];
1378 : static const StructTable struct_table[];
1379 :
1380 : static const int kYoungSurvivalRateHighThreshold = 90;
1381 : static const int kYoungSurvivalRateAllowedDeviation = 15;
1382 : static const int kOldSurvivalRateLowThreshold = 10;
1383 :
1384 : static const int kMaxMarkCompactsInIdleRound = 7;
1385 : static const int kIdleScavengeThreshold = 5;
1386 :
1387 : static const int kInitialFeedbackCapacity = 256;
1388 :
1389 : Heap();
1390 : ~Heap();
1391 :
1392 : static bool IsRegularObjectAllocation(AllocationType allocation) {
1393 : return AllocationType::kYoung == allocation ||
1394 : AllocationType::kOld == allocation;
1395 : }
1396 :
1397 0 : static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
1398 0 : return 0;
1399 : }
1400 :
1401 : #define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
1402 : ROOT_LIST(ROOT_ACCESSOR)
1403 : #undef ROOT_ACCESSOR
1404 :
1405 : StoreBuffer* store_buffer() { return store_buffer_.get(); }
1406 :
1407 : void set_current_gc_flags(int flags) {
1408 116994 : current_gc_flags_ = flags;
1409 : }
1410 :
1411 : inline bool ShouldReduceMemory() const {
1412 1126836 : return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1413 : }
1414 :
1415 : int NumberOfScavengeTasks();
1416 :
1417 : // Checks whether a global GC is necessary
1418 : GarbageCollector SelectGarbageCollector(AllocationSpace space,
1419 : const char** reason);
1420 :
1421 : // Make sure there is a filler value behind the top of the new space
1422 : // so that the GC does not confuse some unintialized/stale memory
1423 : // with the allocation memento of the object at the top
1424 : void EnsureFillerObjectAtTop();
1425 :
1426 : // Ensure that we have swept all spaces in such a way that we can iterate
1427 : // over all objects. May cause a GC.
1428 : void MakeHeapIterable();
1429 :
1430 : // Performs garbage collection
1431 : // Returns whether there is a chance another major GC could
1432 : // collect more garbage.
1433 : bool PerformGarbageCollection(
1434 : GarbageCollector collector,
1435 : const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1436 :
1437 : inline void UpdateOldSpaceLimits();
1438 :
1439 : bool CreateInitialMaps();
1440 : void CreateInternalAccessorInfoObjects();
1441 : void CreateInitialObjects();
1442 :
1443 : // Commits from space if it is uncommitted.
1444 : void EnsureFromSpaceIsCommitted();
1445 :
1446 : // Uncommit unused semi space.
1447 : bool UncommitFromSpace();
1448 :
1449 : // Fill in bogus values in from space
1450 : void ZapFromSpace();
1451 :
1452 : // Zaps the memory of a code object.
1453 : void ZapCodeObject(Address start_address, int size_in_bytes);
1454 :
1455 : // Deopts all code that contains allocation instruction which are tenured or
1456 : // not tenured. Moreover it clears the pretenuring allocation site statistics.
1457 : void ResetAllAllocationSitesDependentCode(AllocationType allocation);
1458 :
1459 : // Evaluates local pretenuring for the old space and calls
1460 : // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1461 : // the old space.
1462 : void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1463 :
1464 : // Record statistics after garbage collection.
1465 : void ReportStatisticsAfterGC();
1466 :
1467 : // Flush the number to string cache.
1468 : void FlushNumberStringCache();
1469 :
1470 : void ConfigureInitialOldGenerationSize();
1471 :
1472 : bool HasLowYoungGenerationAllocationRate();
1473 : bool HasLowOldGenerationAllocationRate();
1474 : double YoungGenerationMutatorUtilization();
1475 : double OldGenerationMutatorUtilization();
1476 :
1477 : void ReduceNewSpaceSize();
1478 :
1479 : GCIdleTimeHeapState ComputeHeapState();
1480 :
1481 : bool PerformIdleTimeAction(GCIdleTimeAction action,
1482 : GCIdleTimeHeapState heap_state,
1483 : double deadline_in_ms);
1484 :
1485 : void IdleNotificationEpilogue(GCIdleTimeAction action,
1486 : GCIdleTimeHeapState heap_state, double start_ms,
1487 : double deadline_in_ms);
1488 :
1489 : int NextAllocationTimeout(int current_timeout = 0);
1490 : inline void UpdateAllocationsHash(HeapObject object);
1491 : inline void UpdateAllocationsHash(uint32_t value);
1492 : void PrintAllocationsHash();
1493 :
1494 : void PrintMaxMarkingLimitReached();
1495 : void PrintMaxNewSpaceSizeReached();
1496 :
1497 : int NextStressMarkingLimit();
1498 :
1499 : void AddToRingBuffer(const char* string);
1500 : void GetFromRingBuffer(char* buffer);
1501 :
1502 : void CompactRetainedMaps(WeakArrayList retained_maps);
1503 :
1504 : void CollectGarbageOnMemoryPressure();
1505 :
1506 : void EagerlyFreeExternalMemory();
1507 :
1508 : bool InvokeNearHeapLimitCallback();
1509 :
1510 : void ComputeFastPromotionMode();
1511 :
1512 : // Attempt to over-approximate the weak closure by marking object groups and
1513 : // implicit references from global handles, but don't atomically complete
1514 : // marking. If we continue to mark incrementally, we might have marked
1515 : // objects that die later.
1516 : void FinalizeIncrementalMarkingIncrementally(
1517 : GarbageCollectionReason gc_reason);
1518 :
1519 : // Returns the timer used for a given GC type.
1520 : // - GCScavenger: young generation GC
1521 : // - GCCompactor: full GC
1522 : // - GCFinalzeMC: finalization of incremental full GC
1523 : // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1524 : // memory reduction
1525 : TimedHistogram* GCTypeTimer(GarbageCollector collector);
1526 : TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
1527 :
1528 : // ===========================================================================
1529 : // Pretenuring. ==============================================================
1530 : // ===========================================================================
1531 :
1532 : // Pretenuring decisions are made based on feedback collected during new space
1533 : // evacuation. Note that between feedback collection and calling this method
1534 : // object in old space must not move.
1535 : void ProcessPretenuringFeedback();
1536 :
1537 : // Removes an entry from the global pretenuring storage.
1538 : void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
1539 :
1540 : // ===========================================================================
1541 : // Actual GC. ================================================================
1542 : // ===========================================================================
1543 :
1544 : // Code that should be run before and after each GC. Includes some
1545 : // reporting/verification activities when compiled with DEBUG set.
1546 : void GarbageCollectionPrologue();
1547 : void GarbageCollectionEpilogue();
1548 :
1549 : // Performs a major collection in the whole heap.
1550 : void MarkCompact();
1551 : // Performs a minor collection of just the young generation.
1552 : void MinorMarkCompact();
1553 :
1554 : // Code to be run before and after mark-compact.
1555 : void MarkCompactPrologue();
1556 : void MarkCompactEpilogue();
1557 :
1558 : // Performs a minor collection in new generation.
1559 : void Scavenge();
1560 : void EvacuateYoungGeneration();
1561 :
1562 : void UpdateYoungReferencesInExternalStringTable(
1563 : ExternalStringTableUpdaterCallback updater_func);
1564 :
1565 : void UpdateReferencesInExternalStringTable(
1566 : ExternalStringTableUpdaterCallback updater_func);
1567 :
1568 : void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1569 : void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1570 : void ProcessNativeContexts(WeakObjectRetainer* retainer);
1571 : void ProcessAllocationSites(WeakObjectRetainer* retainer);
1572 : void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1573 :
1574 : // ===========================================================================
1575 : // GC statistics. ============================================================
1576 : // ===========================================================================
1577 :
1578 226678 : inline size_t OldGenerationSpaceAvailable() {
1579 453356 : if (old_generation_allocation_limit_ <=
1580 226373 : OldGenerationObjectsAndPromotedExternalMemorySize())
1581 : return 0;
1582 225860 : return old_generation_allocation_limit_ -
1583 : static_cast<size_t>(
1584 225860 : OldGenerationObjectsAndPromotedExternalMemorySize());
1585 : }
1586 :
1587 : // We allow incremental marking to overshoot the allocation limit for
1588 : // performace reasons. If the overshoot is too large then we are more
1589 : // eager to finalize incremental marking.
1590 943 : inline bool AllocationLimitOvershotByLargeMargin() {
1591 : // This guards against too eager finalization in small heaps.
1592 : // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
1593 : size_t kMarginForSmallHeaps = 32u * MB;
1594 1886 : if (old_generation_allocation_limit_ >=
1595 943 : OldGenerationObjectsAndPromotedExternalMemorySize())
1596 : return false;
1597 748 : uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
1598 748 : old_generation_allocation_limit_;
1599 : // Overshoot margin is 50% of allocation limit or half-way to the max heap
1600 : // with special handling of small heaps.
1601 : uint64_t margin =
1602 748 : Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1603 748 : (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
1604 748 : return overshoot >= margin;
1605 : }
1606 :
1607 : void UpdateTotalGCTime(double duration);
1608 :
1609 94928 : bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1610 :
1611 : bool IsIneffectiveMarkCompact(size_t old_generation_size,
1612 : double mutator_utilization);
1613 : void CheckIneffectiveMarkCompact(size_t old_generation_size,
1614 : double mutator_utilization);
1615 :
1616 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1617 : size_t amount);
1618 :
1619 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1620 : size_t amount);
1621 :
1622 : // ===========================================================================
1623 : // Growing strategy. =========================================================
1624 : // ===========================================================================
1625 :
1626 : HeapController* heap_controller() { return heap_controller_.get(); }
1627 : MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
1628 :
1629 : // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
1630 : // This constant limits the effect of load RAIL mode on GC.
1631 : // The value is arbitrary and chosen as the largest load time observed in
1632 : // v8 browsing benchmarks.
1633 : static const int kMaxLoadTimeMs = 7000;
1634 :
1635 : bool ShouldOptimizeForLoadTime();
1636 :
1637 : size_t old_generation_allocation_limit() const {
1638 : return old_generation_allocation_limit_;
1639 : }
1640 :
1641 : bool always_allocate() { return always_allocate_scope_count_ != 0; }
1642 :
1643 : bool CanExpandOldGeneration(size_t size);
1644 :
1645 : bool ShouldExpandOldGenerationOnSlowAllocation();
1646 :
1647 : enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
1648 :
1649 : HeapGrowingMode CurrentHeapGrowingMode();
1650 :
1651 : enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
1652 : IncrementalMarkingLimit IncrementalMarkingLimitReached();
1653 :
1654 : // ===========================================================================
1655 : // Idle notification. ========================================================
1656 : // ===========================================================================
1657 :
1658 : bool RecentIdleNotificationHappened();
1659 : void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1660 :
1661 : // ===========================================================================
1662 : // HeapIterator helpers. =====================================================
1663 : // ===========================================================================
1664 :
1665 7820 : void heap_iterator_start() { heap_iterator_depth_++; }
1666 :
1667 7820 : void heap_iterator_end() { heap_iterator_depth_--; }
1668 :
1669 : bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
1670 :
1671 : // ===========================================================================
1672 : // Allocation methods. =======================================================
1673 : // ===========================================================================
1674 :
1675 : // Allocates a JS Map in the heap.
1676 : V8_WARN_UNUSED_RESULT AllocationResult
1677 : AllocateMap(InstanceType instance_type, int instance_size,
1678 : ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
1679 : int inobject_properties = 0);
1680 :
1681 : // Allocate an uninitialized object. The memory is non-executable if the
1682 : // hardware and OS allow. This is the single choke-point for allocations
1683 : // performed by the runtime and should not be bypassed (to extend this to
1684 : // inlined allocations, use the Heap::DisableInlineAllocation() support).
1685 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
1686 : int size_in_bytes, AllocationType allocation,
1687 : AllocationAlignment aligment = kWordAligned);
1688 :
1689 : // This method will try to perform an allocation of a given size of a given
1690 : // AllocationType. If the allocation fails, a regular full garbage collection
1691 : // is triggered and the allocation is retried. This is performed multiple
1692 : // times. If after that retry procedure the allocation still fails nullptr is
1693 : // returned.
1694 : HeapObject AllocateRawWithLightRetry(
1695 : int size, AllocationType allocation,
1696 : AllocationAlignment alignment = kWordAligned);
1697 :
1698 : // This method will try to perform an allocation of a given size of a given
1699 : // AllocationType. If the allocation fails, a regular full garbage collection
1700 : // is triggered and the allocation is retried. This is performed multiple
1701 : // times. If after that retry procedure the allocation still fails a "hammer"
1702 : // garbage collection is triggered which tries to significantly reduce memory.
1703 : // If the allocation still fails after that a fatal error is thrown.
1704 : HeapObject AllocateRawWithRetryOrFail(
1705 : int size, AllocationType allocation,
1706 : AllocationAlignment alignment = kWordAligned);
1707 : HeapObject AllocateRawCodeInLargeObjectSpace(int size);
1708 :
1709 : // Allocates a heap object based on the map.
1710 : V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
1711 : AllocationType allocation);
1712 :
1713 : // Takes a code object and checks if it is on memory which is not subject to
1714 : // compaction. This method will return a new code object on an immovable
1715 : // memory location if the original code object was movable.
1716 : HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
1717 :
1718 : // Allocates a partial map for bootstrapping.
1719 : V8_WARN_UNUSED_RESULT AllocationResult
1720 : AllocatePartialMap(InstanceType instance_type, int instance_size);
1721 :
1722 : void FinalizePartialMap(Map map);
1723 :
1724 : // Allocate empty fixed typed array of given type.
1725 : V8_WARN_UNUSED_RESULT AllocationResult
1726 : AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1727 :
1728 70 : void set_force_oom(bool value) { force_oom_ = value; }
1729 :
1730 : // ===========================================================================
1731 : // Retaining path tracing ====================================================
1732 : // ===========================================================================
1733 :
1734 : void AddRetainer(HeapObject retainer, HeapObject object);
1735 : void AddEphemeronRetainer(HeapObject retainer, HeapObject object);
1736 : void AddRetainingRoot(Root root, HeapObject object);
1737 : // Returns true if the given object is a target of retaining path tracking.
1738 : // Stores the option corresponding to the object in the provided *option.
1739 : bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
1740 : void PrintRetainingPath(HeapObject object, RetainingPathOption option);
1741 :
1742 : #ifdef DEBUG
1743 : void IncrementObjectCounters();
1744 : #endif // DEBUG
1745 :
1746 : // The amount of memory that has been freed concurrently.
1747 : std::atomic<intptr_t> external_memory_concurrently_freed_{0};
1748 :
1749 : // This can be calculated directly from a pointer to the heap; however, it is
1750 : // more expedient to get at the isolate directly from within Heap methods.
1751 : Isolate* isolate_ = nullptr;
1752 :
1753 : size_t code_range_size_ = 0;
1754 : size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
1755 : size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
1756 : size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
1757 : size_t initial_max_old_generation_size_;
1758 : size_t initial_max_old_generation_size_threshold_;
1759 : size_t initial_old_generation_size_;
1760 : bool old_generation_size_configured_ = false;
1761 : size_t maximum_committed_ = 0;
1762 : size_t old_generation_capacity_after_bootstrap_ = 0;
1763 :
1764 : // Backing store bytes (array buffers and external strings).
1765 : std::atomic<size_t> backing_store_bytes_{0};
1766 :
1767 : // For keeping track of how much data has survived
1768 : // scavenge since last new space expansion.
1769 : size_t survived_since_last_expansion_ = 0;
1770 :
1771 : // ... and since the last scavenge.
1772 : size_t survived_last_scavenge_ = 0;
1773 :
1774 : // This is not the depth of nested AlwaysAllocateScope's but rather a single
1775 : // count, as scopes can be acquired from multiple tasks (read: threads).
1776 : std::atomic<size_t> always_allocate_scope_count_{0};
1777 :
1778 : // Stores the memory pressure level that set by MemoryPressureNotification
1779 : // and reset by a mark-compact garbage collection.
1780 : std::atomic<MemoryPressureLevel> memory_pressure_level_;
1781 :
1782 : std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
1783 : near_heap_limit_callbacks_;
1784 :
1785 : // For keeping track of context disposals.
1786 : int contexts_disposed_ = 0;
1787 :
1788 : // The length of the retained_maps array at the time of context disposal.
1789 : // This separates maps in the retained_maps array that were created before
1790 : // and after context disposal.
1791 : int number_of_disposed_maps_ = 0;
1792 :
1793 : ReadOnlyHeap* read_only_heap_ = nullptr;
1794 :
1795 : NewSpace* new_space_ = nullptr;
1796 : OldSpace* old_space_ = nullptr;
1797 : CodeSpace* code_space_ = nullptr;
1798 : MapSpace* map_space_ = nullptr;
1799 : LargeObjectSpace* lo_space_ = nullptr;
1800 : CodeLargeObjectSpace* code_lo_space_ = nullptr;
1801 : NewLargeObjectSpace* new_lo_space_ = nullptr;
1802 : ReadOnlySpace* read_only_space_ = nullptr;
1803 : // Map from the space id to the space.
1804 : Space* space_[LAST_SPACE + 1];
1805 :
1806 : // Determines whether code space is write-protected. This is essentially a
1807 : // race-free copy of the {FLAG_write_protect_code_memory} flag.
1808 : bool write_protect_code_memory_ = false;
1809 :
1810 : // Holds the number of open CodeSpaceMemoryModificationScopes.
1811 : uintptr_t code_space_memory_modification_scope_depth_ = 0;
1812 :
1813 : HeapState gc_state_ = NOT_IN_GC;
1814 :
1815 : int gc_post_processing_depth_ = 0;
1816 :
1817 : // Returns the amount of external memory registered since last global gc.
1818 : uint64_t PromotedExternalMemorySize();
1819 :
1820 : // How many "runtime allocations" happened.
1821 : uint32_t allocations_count_ = 0;
1822 :
1823 : // Running hash over allocations performed.
1824 : uint32_t raw_allocations_hash_ = 0;
1825 :
1826 : // Starts marking when stress_marking_percentage_% of the marking start limit
1827 : // is reached.
1828 : int stress_marking_percentage_ = 0;
1829 :
1830 : // Observer that causes more frequent checks for reached incremental marking
1831 : // limit.
1832 : AllocationObserver* stress_marking_observer_ = nullptr;
1833 :
1834 : // Observer that can cause early scavenge start.
1835 : StressScavengeObserver* stress_scavenge_observer_ = nullptr;
1836 :
1837 : bool allocation_step_in_progress_ = false;
1838 :
1839 : // The maximum percent of the marking limit reached wihout causing marking.
1840 : // This is tracked when specyfing --fuzzer-gc-analysis.
1841 : double max_marking_limit_reached_ = 0.0;
1842 :
1843 : // How many mark-sweep collections happened.
1844 : unsigned int ms_count_ = 0;
1845 :
1846 : // How many gc happened.
1847 : unsigned int gc_count_ = 0;
1848 :
1849 : // The number of Mark-Compact garbage collections that are considered as
1850 : // ineffective. See IsIneffectiveMarkCompact() predicate.
1851 : int consecutive_ineffective_mark_compacts_ = 0;
1852 :
1853 : static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
1854 : uintptr_t mmap_region_base_ = 0;
1855 :
1856 : // For post mortem debugging.
1857 : int remembered_unmapped_pages_index_ = 0;
1858 : Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1859 :
1860 : // Limit that triggers a global GC on the next (normally caused) GC. This
1861 : // is checked when we have already decided to do a GC to help determine
1862 : // which collector to invoke, before expanding a paged space in the old
1863 : // generation and on every allocation in large object space.
1864 : size_t old_generation_allocation_limit_;
1865 :
1866 : // Indicates that inline bump-pointer allocation has been globally disabled
1867 : // for all spaces. This is used to disable allocations in generated code.
1868 : bool inline_allocation_disabled_ = false;
1869 :
1870 : // Weak list heads, threaded through the objects.
1871 : // List heads are initialized lazily and contain the undefined_value at start.
1872 : Object native_contexts_list_;
1873 : Object allocation_sites_list_;
1874 :
1875 : std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
1876 : std::vector<GCCallbackTuple> gc_prologue_callbacks_;
1877 :
1878 : GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
1879 :
1880 : int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
1881 :
1882 : size_t promoted_objects_size_ = 0;
1883 : double promotion_ratio_ = 0.0;
1884 : double promotion_rate_ = 0.0;
1885 : size_t semi_space_copied_object_size_ = 0;
1886 : size_t previous_semi_space_copied_object_size_ = 0;
1887 : double semi_space_copied_rate_ = 0.0;
1888 : int nodes_died_in_new_space_ = 0;
1889 : int nodes_copied_in_new_space_ = 0;
1890 : int nodes_promoted_ = 0;
1891 :
1892 : // This is the pretenuring trigger for allocation sites that are in maybe
1893 : // tenure state. When we switched to the maximum new space size we deoptimize
1894 : // the code that belongs to the allocation site and derive the lifetime
1895 : // of the allocation site.
1896 : unsigned int maximum_size_scavenges_ = 0;
1897 :
1898 : // Total time spent in GC.
1899 : double total_gc_time_ms_;
1900 :
1901 : // Last time an idle notification happened.
1902 : double last_idle_notification_time_ = 0.0;
1903 :
1904 : // Last time a garbage collection happened.
1905 : double last_gc_time_ = 0.0;
1906 :
1907 : std::unique_ptr<GCTracer> tracer_;
1908 : std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
1909 : MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
1910 : std::unique_ptr<ScavengerCollector> scavenger_collector_;
1911 : std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
1912 : std::unique_ptr<MemoryAllocator> memory_allocator_;
1913 : std::unique_ptr<StoreBuffer> store_buffer_;
1914 : std::unique_ptr<HeapController> heap_controller_;
1915 : std::unique_ptr<IncrementalMarking> incremental_marking_;
1916 : std::unique_ptr<ConcurrentMarking> concurrent_marking_;
1917 : std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
1918 : std::unique_ptr<MemoryReducer> memory_reducer_;
1919 : std::unique_ptr<ObjectStats> live_object_stats_;
1920 : std::unique_ptr<ObjectStats> dead_object_stats_;
1921 : std::unique_ptr<ScavengeJob> scavenge_job_;
1922 : std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
1923 : std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
1924 : StrongRootsList* strong_roots_list_ = nullptr;
1925 :
1926 : // This counter is increased before each GC and never reset.
1927 : // To account for the bytes allocated since the last GC, use the
1928 : // NewSpaceAllocationCounter() function.
1929 : size_t new_space_allocation_counter_ = 0;
1930 :
1931 : // This counter is increased before each GC and never reset. To
1932 : // account for the bytes allocated since the last GC, use the
1933 : // OldGenerationAllocationCounter() function.
1934 : size_t old_generation_allocation_counter_at_last_gc_ = 0;
1935 :
1936 : // The size of objects in old generation after the last MarkCompact GC.
1937 : size_t old_generation_size_at_last_gc_ = 0;
1938 :
1939 : // The feedback storage is used to store allocation sites (keys) and how often
1940 : // they have been visited (values) by finding a memento behind an object. The
1941 : // storage is only alive temporary during a GC. The invariant is that all
1942 : // pointers in this map are already fixed, i.e., they do not point to
1943 : // forwarding pointers.
1944 : PretenuringFeedbackMap global_pretenuring_feedback_;
1945 :
1946 : char trace_ring_buffer_[kTraceRingBufferSize];
1947 :
1948 : // Used as boolean.
1949 : uint8_t is_marking_flag_ = 0;
1950 :
1951 : // If it's not full then the data is from 0 to ring_buffer_end_. If it's
1952 : // full then the data is from ring_buffer_end_ to the end of the buffer and
1953 : // from 0 to ring_buffer_end_.
1954 : bool ring_buffer_full_ = false;
1955 : size_t ring_buffer_end_ = 0;
1956 :
1957 : // Flag is set when the heap has been configured. The heap can be repeatedly
1958 : // configured through the API until it is set up.
1959 : bool configured_ = false;
1960 :
1961 : // Currently set GC flags that are respected by all GC components.
1962 : int current_gc_flags_ = Heap::kNoGCFlags;
1963 :
1964 : // Currently set GC callback flags that are used to pass information between
1965 : // the embedder and V8's GC.
1966 : GCCallbackFlags current_gc_callback_flags_;
1967 :
1968 : bool is_current_gc_forced_;
1969 :
1970 : ExternalStringTable external_string_table_;
1971 :
1972 : base::Mutex relocation_mutex_;
1973 :
1974 : int gc_callbacks_depth_ = 0;
1975 :
1976 : bool deserialization_complete_ = false;
1977 :
1978 : // The depth of HeapIterator nestings.
1979 : int heap_iterator_depth_ = 0;
1980 :
1981 : bool fast_promotion_mode_ = false;
1982 :
1983 : // Used for testing purposes.
1984 : bool force_oom_ = false;
1985 : bool delay_sweeper_tasks_for_testing_ = false;
1986 :
1987 : HeapObject pending_layout_change_object_;
1988 :
1989 : base::Mutex unprotected_memory_chunks_mutex_;
1990 : std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
1991 : bool unprotected_memory_chunks_registry_enabled_ = false;
1992 :
1993 : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1994 : // If the --gc-interval flag is set to a positive value, this
1995 : // variable holds the value indicating the number of allocations
1996 : // remain until the next failure and garbage collection.
1997 : int allocation_timeout_ = 0;
1998 : #endif // V8_ENABLE_ALLOCATION_TIMEOUT
1999 :
2000 : std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
2001 : std::map<HeapObject, Root, Object::Comparer> retaining_root_;
2002 : // If an object is retained by an ephemeron, then the retaining key of the
2003 : // ephemeron is stored in this map.
2004 : std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
2005 : // For each index inthe retaining_path_targets_ array this map
2006 : // stores the option of the corresponding target.
2007 : std::map<int, RetainingPathOption> retaining_path_target_option_;
2008 :
2009 : std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
2010 :
2011 : // Classes in "heap" can be friends.
2012 : friend class AlwaysAllocateScope;
2013 : friend class ArrayBufferCollector;
2014 : friend class ConcurrentMarking;
2015 : friend class GCCallbacksScope;
2016 : friend class GCTracer;
2017 : friend class MemoryController;
2018 : friend class HeapIterator;
2019 : friend class IdleScavengeObserver;
2020 : friend class IncrementalMarking;
2021 : friend class IncrementalMarkingJob;
2022 : friend class LargeObjectSpace;
2023 : template <FixedArrayVisitationMode fixed_array_mode,
2024 : TraceRetainingPathMode retaining_path_mode, typename MarkingState>
2025 : friend class MarkingVisitor;
2026 : friend class MarkCompactCollector;
2027 : friend class MarkCompactCollectorBase;
2028 : friend class MinorMarkCompactCollector;
2029 : friend class NewLargeObjectSpace;
2030 : friend class NewSpace;
2031 : friend class ObjectStatsCollector;
2032 : friend class Page;
2033 : friend class PagedSpace;
2034 : friend class ReadOnlyRoots;
2035 : friend class Scavenger;
2036 : friend class ScavengerCollector;
2037 : friend class Space;
2038 : friend class StoreBuffer;
2039 : friend class Sweeper;
2040 : friend class heap::TestMemoryAllocatorScope;
2041 :
2042 : // The allocator interface.
2043 : friend class Factory;
2044 :
2045 : // The Isolate constructs us.
2046 : friend class Isolate;
2047 :
2048 : // Used in cctest.
2049 : friend class heap::HeapTester;
2050 :
2051 : FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
2052 : FRIEND_TEST(HeapTest, ExternalLimitDefault);
2053 : FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
2054 : DISALLOW_COPY_AND_ASSIGN(Heap);
2055 : };
2056 :
2057 :
2058 : class HeapStats {
2059 : public:
2060 : static const int kStartMarker = 0xDECADE00;
2061 : static const int kEndMarker = 0xDECADE01;
2062 :
2063 : intptr_t* start_marker; // 0
2064 : size_t* ro_space_size; // 1
2065 : size_t* ro_space_capacity; // 2
2066 : size_t* new_space_size; // 3
2067 : size_t* new_space_capacity; // 4
2068 : size_t* old_space_size; // 5
2069 : size_t* old_space_capacity; // 6
2070 : size_t* code_space_size; // 7
2071 : size_t* code_space_capacity; // 8
2072 : size_t* map_space_size; // 9
2073 : size_t* map_space_capacity; // 10
2074 : size_t* lo_space_size; // 11
2075 : size_t* code_lo_space_size; // 12
2076 : size_t* global_handle_count; // 13
2077 : size_t* weak_global_handle_count; // 14
2078 : size_t* pending_global_handle_count; // 15
2079 : size_t* near_death_global_handle_count; // 16
2080 : size_t* free_global_handle_count; // 17
2081 : size_t* memory_allocator_size; // 18
2082 : size_t* memory_allocator_capacity; // 19
2083 : size_t* malloced_memory; // 20
2084 : size_t* malloced_peak_memory; // 21
2085 : size_t* objects_per_type; // 22
2086 : size_t* size_per_type; // 23
2087 : int* os_error; // 24
2088 : char* last_few_messages; // 25
2089 : char* js_stacktrace; // 26
2090 : intptr_t* end_marker; // 27
2091 : };
2092 :
2093 :
2094 : class AlwaysAllocateScope {
2095 : public:
2096 : explicit inline AlwaysAllocateScope(Heap* heap);
2097 : explicit inline AlwaysAllocateScope(Isolate* isolate);
2098 : inline ~AlwaysAllocateScope();
2099 :
2100 : private:
2101 : Heap* heap_;
2102 : };
2103 :
2104 : // The CodeSpaceMemoryModificationScope can only be used by the main thread.
2105 : class CodeSpaceMemoryModificationScope {
2106 : public:
2107 : explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
2108 : inline ~CodeSpaceMemoryModificationScope();
2109 :
2110 : private:
2111 : Heap* heap_;
2112 : };
2113 :
2114 : // The CodePageCollectionMemoryModificationScope can only be used by the main
2115 : // thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
2116 : // already active.
2117 : class CodePageCollectionMemoryModificationScope {
2118 : public:
2119 : explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
2120 : inline ~CodePageCollectionMemoryModificationScope();
2121 :
2122 : private:
2123 : Heap* heap_;
2124 : };
2125 :
2126 : // The CodePageMemoryModificationScope does not check if tansitions to
2127 : // writeable and back to executable are actually allowed, i.e. the MemoryChunk
2128 : // was registered to be executable. It can be used by concurrent threads.
2129 : class CodePageMemoryModificationScope {
2130 : public:
2131 : explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
2132 : inline ~CodePageMemoryModificationScope();
2133 :
2134 : private:
2135 : MemoryChunk* chunk_;
2136 : bool scope_active_;
2137 :
2138 : // Disallow any GCs inside this scope, as a relocation of the underlying
2139 : // object would change the {MemoryChunk} that this scope targets.
2140 : DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2141 : };
2142 :
2143 : // Visitor class to verify interior pointers in spaces that do not contain
2144 : // or care about intergenerational references. All heap object pointers have to
2145 : // point into the heap to a location that has a map pointer at its first word.
2146 : // Caveat: Heap::Contains is an approximation because it can return true for
2147 : // objects in a heap space but above the allocation pointer.
2148 0 : class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2149 : public:
2150 : explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
2151 : void VisitPointers(HeapObject host, ObjectSlot start,
2152 : ObjectSlot end) override;
2153 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
2154 : MaybeObjectSlot end) override;
2155 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
2156 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
2157 :
2158 : void VisitRootPointers(Root root, const char* description,
2159 : FullObjectSlot start, FullObjectSlot end) override;
2160 :
2161 : protected:
2162 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
2163 :
2164 : template <typename TSlot>
2165 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
2166 :
2167 : virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
2168 : MaybeObjectSlot end);
2169 :
2170 : Heap* heap_;
2171 : };
2172 :
2173 :
2174 : // Verify that all objects are Smis.
2175 0 : class VerifySmisVisitor : public RootVisitor {
2176 : public:
2177 : void VisitRootPointers(Root root, const char* description,
2178 : FullObjectSlot start, FullObjectSlot end) override;
2179 : };
2180 :
2181 : // Space iterator for iterating over all the paged spaces of the heap: Map
2182 : // space, old space, code space and optionally read only space. Returns each
2183 : // space in turn, and null when it is done.
2184 : class V8_EXPORT_PRIVATE PagedSpaces {
2185 : public:
2186 : enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
2187 :
2188 : explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
2189 : SpacesSpecifier::kSweepablePagedSpaces)
2190 : : heap_(heap),
2191 : counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
2192 5403339 : : OLD_SPACE) {}
2193 : PagedSpace* next();
2194 :
2195 : private:
2196 : Heap* heap_;
2197 : int counter_;
2198 : };
2199 :
2200 :
2201 234385 : class SpaceIterator : public Malloced {
2202 : public:
2203 : explicit SpaceIterator(Heap* heap);
2204 : virtual ~SpaceIterator();
2205 :
2206 : bool has_next();
2207 : Space* next();
2208 :
2209 : private:
2210 : Heap* heap_;
2211 : int current_space_; // from enum AllocationSpace.
2212 : };
2213 :
2214 :
2215 : // A HeapIterator provides iteration over the whole heap. It
2216 : // aggregates the specific iterators for the different spaces as
2217 : // these can only iterate over one space only.
2218 : //
2219 : // HeapIterator ensures there is no allocation during its lifetime
2220 : // (using an embedded DisallowHeapAllocation instance).
2221 : //
2222 : // HeapIterator can skip free list nodes (that is, de-allocated heap
2223 : // objects that still remain in the heap). As implementation of free
2224 : // nodes filtering uses GC marks, it can't be used during MS/MC GC
2225 : // phases. Also, it is forbidden to interrupt iteration in this mode,
2226 : // as this will leave heap objects marked (and thus, unusable).
2227 : class HeapIterator {
2228 : public:
2229 : enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2230 :
2231 : explicit HeapIterator(Heap* heap,
2232 : HeapObjectsFiltering filtering = kNoFiltering);
2233 : ~HeapIterator();
2234 :
2235 : HeapObject next();
2236 :
2237 : private:
2238 : HeapObject NextObject();
2239 :
2240 : DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2241 :
2242 : Heap* heap_;
2243 : HeapObjectsFiltering filtering_;
2244 : HeapObjectsFilter* filter_;
2245 : // Space iterator for iterating all the spaces.
2246 : SpaceIterator* space_iterator_;
2247 : // Object iterator for the space currently being iterated.
2248 : std::unique_ptr<ObjectIterator> object_iterator_;
2249 : };
2250 :
2251 : // Abstract base class for checking whether a weak object should be retained.
2252 73955 : class WeakObjectRetainer {
2253 : public:
2254 168883 : virtual ~WeakObjectRetainer() = default;
2255 :
2256 : // Return whether this object should be retained. If nullptr is returned the
2257 : // object has no references. Otherwise the address of the retained object
2258 : // should be returned as in some GC situations the object has been moved.
2259 : virtual Object RetainAs(Object object) = 0;
2260 : };
2261 :
2262 : // -----------------------------------------------------------------------------
2263 : // Allows observation of allocations.
2264 : class AllocationObserver {
2265 : public:
2266 : explicit AllocationObserver(intptr_t step_size)
2267 184767 : : step_size_(step_size), bytes_to_next_step_(step_size) {
2268 : DCHECK_LE(kTaggedSize, step_size);
2269 : }
2270 184722 : virtual ~AllocationObserver() = default;
2271 :
2272 : // Called each time the observed space does an allocation step. This may be
2273 : // more frequently than the step_size we are monitoring (e.g. when there are
2274 : // multiple observers, or when page or space boundary is encountered.)
2275 : void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
2276 :
2277 : protected:
2278 : intptr_t step_size() const { return step_size_; }
2279 : intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2280 :
2281 : // Pure virtual method provided by the subclasses that gets called when at
2282 : // least step_size bytes have been allocated. soon_object is the address just
2283 : // allocated (but not yet initialized.) size is the size of the object as
2284 : // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2285 : // of:
2286 : // 1) soon_object will be nullptr in cases where we end up observing an
2287 : // allocation that happens to be a filler space (e.g. page boundaries.)
2288 : // 2) size is the requested size at the time of allocation. Right-trimming
2289 : // may change the object size dynamically.
2290 : // 3) soon_object may actually be the first object in an allocation-folding
2291 : // group. In such a case size is the size of the group rather than the
2292 : // first object.
2293 : virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2294 :
2295 : // Subclasses can override this method to make step size dynamic.
2296 56504 : virtual intptr_t GetNextStepSize() { return step_size_; }
2297 :
2298 : intptr_t step_size_;
2299 : intptr_t bytes_to_next_step_;
2300 :
2301 : private:
2302 : friend class Space;
2303 : DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2304 : };
2305 :
2306 : V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
2307 :
2308 : // -----------------------------------------------------------------------------
2309 : // Allows observation of heap object allocations.
2310 69671 : class HeapObjectAllocationTracker {
2311 : public:
2312 : virtual void AllocationEvent(Address addr, int size) = 0;
2313 14155 : virtual void MoveEvent(Address from, Address to, int size) {}
2314 2887 : virtual void UpdateObjectSizeEvent(Address addr, int size) {}
2315 69658 : virtual ~HeapObjectAllocationTracker() = default;
2316 : };
2317 :
2318 : template <typename T>
2319 230981 : T ForwardingAddress(T heap_obj) {
2320 : MapWord map_word = heap_obj->map_word();
2321 :
2322 230981 : if (map_word.IsForwardingAddress()) {
2323 : return T::cast(map_word.ToForwardingAddress());
2324 205667 : } else if (Heap::InFromPage(heap_obj)) {
2325 2277 : return T();
2326 : } else {
2327 : // TODO(ulan): Support minor mark-compactor here.
2328 203390 : return heap_obj;
2329 : }
2330 : }
2331 :
2332 : } // namespace internal
2333 : } // namespace v8
2334 :
2335 : #endif // V8_HEAP_HEAP_H_
|