LCOV - code coverage report
Current view: top level - src/heap - heap.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1735 2261 76.7 %
Date: 2019-04-19 Functions: 239 320 74.7 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/heap.h"
       6             : 
       7             : #include <unordered_map>
       8             : #include <unordered_set>
       9             : 
      10             : #include "src/accessors.h"
      11             : #include "src/api-inl.h"
      12             : #include "src/assembler-inl.h"
      13             : #include "src/base/bits.h"
      14             : #include "src/base/once.h"
      15             : #include "src/base/utils/random-number-generator.h"
      16             : #include "src/bootstrapper.h"
      17             : #include "src/compilation-cache.h"
      18             : #include "src/conversions.h"
      19             : #include "src/debug/debug.h"
      20             : #include "src/deoptimizer.h"
      21             : #include "src/feedback-vector.h"
      22             : #include "src/global-handles.h"
      23             : #include "src/heap/array-buffer-collector.h"
      24             : #include "src/heap/array-buffer-tracker-inl.h"
      25             : #include "src/heap/barrier.h"
      26             : #include "src/heap/code-stats.h"
      27             : #include "src/heap/concurrent-marking.h"
      28             : #include "src/heap/embedder-tracing.h"
      29             : #include "src/heap/gc-idle-time-handler.h"
      30             : #include "src/heap/gc-tracer.h"
      31             : #include "src/heap/heap-controller.h"
      32             : #include "src/heap/heap-write-barrier-inl.h"
      33             : #include "src/heap/incremental-marking.h"
      34             : #include "src/heap/mark-compact-inl.h"
      35             : #include "src/heap/mark-compact.h"
      36             : #include "src/heap/memory-reducer.h"
      37             : #include "src/heap/object-stats.h"
      38             : #include "src/heap/objects-visiting-inl.h"
      39             : #include "src/heap/objects-visiting.h"
      40             : #include "src/heap/read-only-heap.h"
      41             : #include "src/heap/remembered-set.h"
      42             : #include "src/heap/scavenge-job.h"
      43             : #include "src/heap/scavenger-inl.h"
      44             : #include "src/heap/store-buffer.h"
      45             : #include "src/heap/stress-marking-observer.h"
      46             : #include "src/heap/stress-scavenge-observer.h"
      47             : #include "src/heap/sweeper.h"
      48             : #include "src/interpreter/interpreter.h"
      49             : #include "src/log.h"
      50             : #include "src/microtask-queue.h"
      51             : #include "src/objects/data-handler.h"
      52             : #include "src/objects/free-space-inl.h"
      53             : #include "src/objects/hash-table-inl.h"
      54             : #include "src/objects/maybe-object.h"
      55             : #include "src/objects/shared-function-info.h"
      56             : #include "src/objects/slots-inl.h"
      57             : #include "src/regexp/jsregexp.h"
      58             : #include "src/runtime-profiler.h"
      59             : #include "src/snapshot/embedded-data.h"
      60             : #include "src/snapshot/natives.h"
      61             : #include "src/snapshot/serializer-common.h"
      62             : #include "src/snapshot/snapshot.h"
      63             : #include "src/string-stream.h"
      64             : #include "src/tracing/trace-event.h"
      65             : #include "src/unicode-decoder.h"
      66             : #include "src/unicode-inl.h"
      67             : #include "src/utils-inl.h"
      68             : #include "src/utils.h"
      69             : #include "src/v8.h"
      70             : #include "src/v8threads.h"
      71             : #include "src/vm-state-inl.h"
      72             : 
      73             : // Has to be the last include (doesn't have include guards):
      74             : #include "src/objects/object-macros.h"
      75             : 
      76             : namespace v8 {
      77             : namespace internal {
      78             : 
      79             : // These are outside the Heap class so they can be forward-declared
      80             : // in heap-write-barrier-inl.h.
      81           0 : bool Heap_PageFlagsAreConsistent(HeapObject object) {
      82           0 :   return Heap::PageFlagsAreConsistent(object);
      83             : }
      84             : 
      85   110646219 : void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
      86             :                                   HeapObject value) {
      87             :   Heap::GenerationalBarrierSlow(object, slot, value);
      88   110646154 : }
      89             : 
      90   124707252 : void Heap_MarkingBarrierSlow(HeapObject object, Address slot,
      91             :                              HeapObject value) {
      92             :   Heap::MarkingBarrierSlow(object, slot, value);
      93   124707169 : }
      94             : 
      95          34 : void Heap_WriteBarrierForCodeSlow(Code host) {
      96          34 :   Heap::WriteBarrierForCodeSlow(host);
      97          34 : }
      98             : 
      99      220056 : void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
     100             :                                          HeapObject object) {
     101      220061 :   Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
     102      220056 : }
     103             : 
     104      271967 : void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
     105             :                                     HeapObject object) {
     106             :   Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
     107      271967 : }
     108             : 
     109         963 : void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
     110             :                                              int offset, int length) {
     111         998 :   Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
     112         963 : }
     113             : 
     114         459 : void Heap_MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
     115     1863511 :   Heap::MarkingBarrierForElementsSlow(heap, object);
     116         459 : }
     117             : 
     118     8910689 : void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
     119             :                                                HeapObject descriptor_array,
     120             :                                                int number_of_own_descriptors) {
     121             :   Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
     122     8910689 :                                              number_of_own_descriptors);
     123     8910696 : }
     124             : 
     125          56 : void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
     126             :   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
     127             :   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
     128          56 : }
     129             : 
     130          56 : void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
     131             :   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
     132             :   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
     133          56 : }
     134             : 
     135          56 : void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
     136             :   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
     137             :   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
     138          56 : }
     139             : 
     140          56 : void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
     141             :   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
     142             :   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
     143          56 : }
     144             : 
     145         246 : void Heap::SetSerializedObjects(FixedArray objects) {
     146             :   DCHECK(isolate()->serializer_enabled());
     147             :   set_serialized_objects(objects);
     148         246 : }
     149             : 
     150         196 : void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
     151             :   DCHECK(isolate()->serializer_enabled());
     152             :   set_serialized_global_proxy_sizes(sizes);
     153         196 : }
     154             : 
     155           0 : bool Heap::GCCallbackTuple::operator==(
     156             :     const Heap::GCCallbackTuple& other) const {
     157           0 :   return other.callback == callback && other.data == data;
     158             : }
     159             : 
     160             : Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
     161             :     const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
     162             : 
     163             : struct Heap::StrongRootsList {
     164             :   FullObjectSlot start;
     165             :   FullObjectSlot end;
     166             :   StrongRootsList* next;
     167             : };
     168             : 
     169      124848 : class IdleScavengeObserver : public AllocationObserver {
     170             :  public:
     171             :   IdleScavengeObserver(Heap& heap, intptr_t step_size)
     172       62442 :       : AllocationObserver(step_size), heap_(heap) {}
     173             : 
     174       32408 :   void Step(int bytes_allocated, Address, size_t) override {
     175       32408 :     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
     176       32408 :   }
     177             : 
     178             :  private:
     179             :   Heap& heap_;
     180             : };
     181             : 
     182       62440 : Heap::Heap()
     183             :     : isolate_(isolate()),
     184             :       initial_max_old_generation_size_(max_old_generation_size_),
     185             :       initial_max_old_generation_size_threshold_(0),
     186             :       initial_old_generation_size_(max_old_generation_size_ /
     187             :                                    kInitalOldGenerationLimitFactor),
     188             :       memory_pressure_level_(MemoryPressureLevel::kNone),
     189             :       old_generation_allocation_limit_(initial_old_generation_size_),
     190             :       global_pretenuring_feedback_(kInitialFeedbackCapacity),
     191             :       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
     192             :       is_current_gc_forced_(false),
     193      624404 :       external_string_table_(this) {
     194             :   // Ensure old_generation_size_ is a multiple of kPageSize.
     195             :   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
     196             : 
     197             :   set_native_contexts_list(Smi::kZero);
     198             :   set_allocation_sites_list(Smi::kZero);
     199             :   // Put a dummy entry in the remembered pages so we can find the list the
     200             :   // minidump even if there are no real unmapped pages.
     201             :   RememberUnmappedPage(kNullAddress, false);
     202       62441 : }
     203             : 
     204             : Heap::~Heap() = default;
     205             : 
     206        1261 : size_t Heap::MaxReserved() {
     207     2736315 :   const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
     208     2736315 :   return static_cast<size_t>(2 * max_semi_space_size_ +
     209             :                              kMaxNewLargeObjectSpaceSize +
     210     2736315 :                              max_old_generation_size_);
     211             : }
     212             : 
     213       30112 : size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
     214             :   const size_t old_space_physical_memory_factor = 4;
     215       30112 :   size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
     216             :                                              old_space_physical_memory_factor *
     217       30112 :                                              kPointerMultiplier);
     218             :   return Max(Min(computed_size, HeapController::kMaxSize),
     219       30112 :              HeapController::kMinSize);
     220             : }
     221             : 
     222          59 : size_t Heap::Capacity() {
     223          59 :   if (!HasBeenSetUp()) return 0;
     224             : 
     225          59 :   return new_space_->Capacity() + OldGenerationCapacity();
     226             : }
     227             : 
     228     2841724 : size_t Heap::OldGenerationCapacity() {
     229     2841724 :   if (!HasBeenSetUp()) return 0;
     230             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     231             :   size_t total = 0;
     232    13272836 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     233             :        space = spaces.next()) {
     234    10618267 :     total += space->Capacity();
     235             :   }
     236     2654570 :   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
     237             : }
     238             : 
     239      678087 : size_t Heap::CommittedOldGenerationMemory() {
     240      678087 :   if (!HasBeenSetUp()) return 0;
     241             : 
     242             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     243             :   size_t total = 0;
     244     3390432 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     245             :        space = spaces.next()) {
     246     2712346 :     total += space->CommittedMemory();
     247             :   }
     248      678087 :   return total + lo_space_->Size() + code_lo_space_->Size();
     249             : }
     250             : 
     251           0 : size_t Heap::CommittedMemoryOfUnmapper() {
     252           0 :   if (!HasBeenSetUp()) return 0;
     253             : 
     254           0 :   return memory_allocator()->unmapper()->CommittedBufferedMemory();
     255             : }
     256             : 
     257      540371 : size_t Heap::CommittedMemory() {
     258      540371 :   if (!HasBeenSetUp()) return 0;
     259             : 
     260      540374 :   return new_space_->CommittedMemory() + new_lo_space_->Size() +
     261      540374 :          CommittedOldGenerationMemory();
     262             : }
     263             : 
     264             : 
     265         246 : size_t Heap::CommittedPhysicalMemory() {
     266         246 :   if (!HasBeenSetUp()) return 0;
     267             : 
     268             :   size_t total = 0;
     269        4182 :   for (SpaceIterator it(this); it.has_next();) {
     270        1968 :     total += it.next()->CommittedPhysicalMemory();
     271             :   }
     272             : 
     273         246 :   return total;
     274             : }
     275             : 
     276      128799 : size_t Heap::CommittedMemoryExecutable() {
     277      128799 :   if (!HasBeenSetUp()) return 0;
     278             : 
     279      128799 :   return static_cast<size_t>(memory_allocator()->SizeExecutable());
     280             : }
     281             : 
     282             : 
     283           0 : void Heap::UpdateMaximumCommitted() {
     284      252314 :   if (!HasBeenSetUp()) return;
     285             : 
     286      252314 :   const size_t current_committed_memory = CommittedMemory();
     287      252314 :   if (current_committed_memory > maximum_committed_) {
     288      100605 :     maximum_committed_ = current_committed_memory;
     289             :   }
     290             : }
     291             : 
     292         305 : size_t Heap::Available() {
     293         305 :   if (!HasBeenSetUp()) return 0;
     294             : 
     295             :   size_t total = 0;
     296             : 
     297        5185 :   for (SpaceIterator it(this); it.has_next();) {
     298        2440 :     total += it.next()->Available();
     299             :   }
     300             : 
     301         305 :   total += memory_allocator()->Available();
     302         305 :   return total;
     303             : }
     304             : 
     305     2686177 : bool Heap::CanExpandOldGeneration(size_t size) {
     306     2686177 :   if (force_oom_) return false;
     307     2674129 :   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
     308             :   // The OldGenerationCapacity does not account compaction spaces used
     309             :   // during evacuation. Ensure that expanding the old generation does push
     310             :   // the total allocated memory size over the maximum heap size.
     311     5345224 :   return memory_allocator()->Size() + size <= MaxReserved();
     312             : }
     313             : 
     314          15 : bool Heap::HasBeenSetUp() {
     315             :   // We will always have a new space when the heap is set up.
     316     6376943 :   return new_space_ != nullptr;
     317             : }
     318             : 
     319             : 
     320       94944 : GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
     321             :                                               const char** reason) {
     322             :   // Is global GC requested?
     323       94944 :   if (space != NEW_SPACE && space != NEW_LO_SPACE) {
     324      136346 :     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
     325       68173 :     *reason = "GC in old space requested";
     326       68173 :     return MARK_COMPACTOR;
     327             :   }
     328             : 
     329       26771 :   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
     330         620 :     *reason = "GC in old space forced by flags";
     331         620 :     return MARK_COMPACTOR;
     332             :   }
     333             : 
     334       26839 :   if (incremental_marking()->NeedsFinalization() &&
     335         688 :       AllocationLimitOvershotByLargeMargin()) {
     336          34 :     *reason = "Incremental marking needs finalization";
     337          34 :     return MARK_COMPACTOR;
     338             :   }
     339             : 
     340             :   // Over-estimate the new space size using capacity to allow some slack.
     341       52234 :   if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
     342       26117 :                               new_lo_space()->Size())) {
     343          19 :     isolate_->counters()
     344             :         ->gc_compactor_caused_by_oldspace_exhaustion()
     345          19 :         ->Increment();
     346          19 :     *reason = "scavenge might not succeed";
     347          19 :     return MARK_COMPACTOR;
     348             :   }
     349             : 
     350             :   // Default
     351       26098 :   *reason = nullptr;
     352       26098 :   return YoungGenerationCollector();
     353             : }
     354             : 
     355           0 : void Heap::SetGCState(HeapState state) {
     356      252315 :   gc_state_ = state;
     357           0 : }
     358             : 
     359          35 : void Heap::PrintShortHeapStatistics() {
     360          35 :   if (!FLAG_trace_gc_verbose) return;
     361           0 :   PrintIsolate(isolate_,
     362             :                "Memory allocator,       used: %6" PRIuS
     363             :                " KB,"
     364             :                " available: %6" PRIuS " KB\n",
     365             :                memory_allocator()->Size() / KB,
     366           0 :                memory_allocator()->Available() / KB);
     367           0 :   PrintIsolate(isolate_,
     368             :                "Read-only space,        used: %6" PRIuS
     369             :                " KB"
     370             :                ", available: %6" PRIuS
     371             :                " KB"
     372             :                ", committed: %6" PRIuS " KB\n",
     373           0 :                read_only_space_->Size() / KB,
     374           0 :                read_only_space_->Available() / KB,
     375           0 :                read_only_space_->CommittedMemory() / KB);
     376           0 :   PrintIsolate(isolate_,
     377             :                "New space,              used: %6" PRIuS
     378             :                " KB"
     379             :                ", available: %6" PRIuS
     380             :                " KB"
     381             :                ", committed: %6" PRIuS " KB\n",
     382           0 :                new_space_->Size() / KB, new_space_->Available() / KB,
     383           0 :                new_space_->CommittedMemory() / KB);
     384           0 :   PrintIsolate(isolate_,
     385             :                "New large object space, used: %6" PRIuS
     386             :                " KB"
     387             :                ", available: %6" PRIuS
     388             :                " KB"
     389             :                ", committed: %6" PRIuS " KB\n",
     390           0 :                new_lo_space_->SizeOfObjects() / KB,
     391           0 :                new_lo_space_->Available() / KB,
     392           0 :                new_lo_space_->CommittedMemory() / KB);
     393           0 :   PrintIsolate(isolate_,
     394             :                "Old space,              used: %6" PRIuS
     395             :                " KB"
     396             :                ", available: %6" PRIuS
     397             :                " KB"
     398             :                ", committed: %6" PRIuS " KB\n",
     399           0 :                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
     400           0 :                old_space_->CommittedMemory() / KB);
     401           0 :   PrintIsolate(isolate_,
     402             :                "Code space,             used: %6" PRIuS
     403             :                " KB"
     404             :                ", available: %6" PRIuS
     405             :                " KB"
     406             :                ", committed: %6" PRIuS "KB\n",
     407           0 :                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
     408           0 :                code_space_->CommittedMemory() / KB);
     409           0 :   PrintIsolate(isolate_,
     410             :                "Map space,              used: %6" PRIuS
     411             :                " KB"
     412             :                ", available: %6" PRIuS
     413             :                " KB"
     414             :                ", committed: %6" PRIuS " KB\n",
     415           0 :                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
     416           0 :                map_space_->CommittedMemory() / KB);
     417           0 :   PrintIsolate(isolate_,
     418             :                "Large object space,     used: %6" PRIuS
     419             :                " KB"
     420             :                ", available: %6" PRIuS
     421             :                " KB"
     422             :                ", committed: %6" PRIuS " KB\n",
     423           0 :                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
     424           0 :                lo_space_->CommittedMemory() / KB);
     425           0 :   PrintIsolate(isolate_,
     426             :                "Code large object space,     used: %6" PRIuS
     427             :                " KB"
     428             :                ", available: %6" PRIuS
     429             :                " KB"
     430             :                ", committed: %6" PRIuS " KB\n",
     431           0 :                code_lo_space_->SizeOfObjects() / KB,
     432           0 :                code_lo_space_->Available() / KB,
     433           0 :                code_lo_space_->CommittedMemory() / KB);
     434           0 :   PrintIsolate(isolate_,
     435             :                "All spaces,             used: %6" PRIuS
     436             :                " KB"
     437             :                ", available: %6" PRIuS
     438             :                " KB"
     439             :                ", committed: %6" PRIuS "KB\n",
     440           0 :                this->SizeOfObjects() / KB, this->Available() / KB,
     441           0 :                this->CommittedMemory() / KB);
     442           0 :   PrintIsolate(isolate_,
     443             :                "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
     444             :                memory_allocator()->unmapper()->NumberOfCommittedChunks(),
     445           0 :                CommittedMemoryOfUnmapper() / KB);
     446           0 :   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
     447           0 :                isolate()->isolate_data()->external_memory_ / KB);
     448           0 :   PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
     449           0 :                backing_store_bytes_ / KB);
     450           0 :   PrintIsolate(isolate_, "External memory global %zu KB\n",
     451           0 :                external_memory_callback_() / KB);
     452           0 :   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
     453           0 :                total_gc_time_ms_);
     454             : }
     455             : 
     456           0 : void Heap::ReportStatisticsAfterGC() {
     457           0 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
     458             :        ++i) {
     459           0 :     int count = deferred_counters_[i];
     460           0 :     deferred_counters_[i] = 0;
     461           0 :     while (count > 0) {
     462           0 :       count--;
     463           0 :       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
     464             :     }
     465             :   }
     466           0 : }
     467             : 
     468        8192 : void Heap::AddHeapObjectAllocationTracker(
     469             :     HeapObjectAllocationTracker* tracker) {
     470        8192 :   if (allocation_trackers_.empty()) DisableInlineAllocation();
     471        8192 :   allocation_trackers_.push_back(tracker);
     472        8192 : }
     473             : 
     474        8188 : void Heap::RemoveHeapObjectAllocationTracker(
     475             :     HeapObjectAllocationTracker* tracker) {
     476             :   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
     477             :                                          allocation_trackers_.end(), tracker),
     478        8188 :                              allocation_trackers_.end());
     479        8188 :   if (allocation_trackers_.empty()) EnableInlineAllocation();
     480        8188 : }
     481             : 
     482           0 : void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
     483             :                                   RetainingPathOption option) {
     484           0 :   if (!FLAG_track_retaining_path) {
     485           0 :     PrintF("Retaining path tracking requires --track-retaining-path\n");
     486             :   } else {
     487             :     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
     488           0 :     int index = array->length();
     489             :     array = WeakArrayList::AddToEnd(isolate(), array,
     490           0 :                                     MaybeObjectHandle::Weak(object));
     491             :     set_retaining_path_targets(*array);
     492             :     DCHECK_EQ(array->length(), index + 1);
     493           0 :     retaining_path_target_option_[index] = option;
     494             :   }
     495           0 : }
     496             : 
     497           0 : bool Heap::IsRetainingPathTarget(HeapObject object,
     498             :                                  RetainingPathOption* option) {
     499             :   WeakArrayList targets = retaining_path_targets();
     500             :   int length = targets->length();
     501             :   MaybeObject object_to_check = HeapObjectReference::Weak(object);
     502           0 :   for (int i = 0; i < length; i++) {
     503             :     MaybeObject target = targets->Get(i);
     504             :     DCHECK(target->IsWeakOrCleared());
     505           0 :     if (target == object_to_check) {
     506             :       DCHECK(retaining_path_target_option_.count(i));
     507           0 :       *option = retaining_path_target_option_[i];
     508             :       return true;
     509             :     }
     510             :   }
     511           0 :   return false;
     512             : }
     513             : 
     514           0 : void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
     515           0 :   PrintF("\n\n\n");
     516           0 :   PrintF("#################################################\n");
     517           0 :   PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
     518           0 :   HeapObject object = target;
     519             :   std::vector<std::pair<HeapObject, bool>> retaining_path;
     520             :   Root root = Root::kUnknown;
     521             :   bool ephemeron = false;
     522             :   while (true) {
     523           0 :     retaining_path.push_back(std::make_pair(object, ephemeron));
     524           0 :     if (option == RetainingPathOption::kTrackEphemeronPath &&
     525             :         ephemeron_retainer_.count(object)) {
     526           0 :       object = ephemeron_retainer_[object];
     527             :       ephemeron = true;
     528           0 :     } else if (retainer_.count(object)) {
     529           0 :       object = retainer_[object];
     530             :       ephemeron = false;
     531             :     } else {
     532           0 :       if (retaining_root_.count(object)) {
     533           0 :         root = retaining_root_[object];
     534             :       }
     535             :       break;
     536             :     }
     537             :   }
     538           0 :   int distance = static_cast<int>(retaining_path.size());
     539           0 :   for (auto node : retaining_path) {
     540           0 :     HeapObject object = node.first;
     541           0 :     bool ephemeron = node.second;
     542           0 :     PrintF("\n");
     543           0 :     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     544           0 :     PrintF("Distance from root %d%s: ", distance,
     545           0 :            ephemeron ? " (ephemeron)" : "");
     546           0 :     object->ShortPrint();
     547           0 :     PrintF("\n");
     548             : #ifdef OBJECT_PRINT
     549             :     object->Print();
     550             :     PrintF("\n");
     551             : #endif
     552           0 :     --distance;
     553             :   }
     554           0 :   PrintF("\n");
     555           0 :   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     556           0 :   PrintF("Root: %s\n", RootVisitor::RootName(root));
     557           0 :   PrintF("-------------------------------------------------\n");
     558           0 : }
     559             : 
     560           0 : void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
     561           0 :   if (retainer_.count(object)) return;
     562           0 :   retainer_[object] = retainer;
     563           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     564           0 :   if (IsRetainingPathTarget(object, &option)) {
     565             :     // Check if the retaining path was already printed in
     566             :     // AddEphemeronRetainer().
     567           0 :     if (ephemeron_retainer_.count(object) == 0 ||
     568           0 :         option == RetainingPathOption::kDefault) {
     569           0 :       PrintRetainingPath(object, option);
     570             :     }
     571             :   }
     572             : }
     573             : 
     574           0 : void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
     575           0 :   if (ephemeron_retainer_.count(object)) return;
     576           0 :   ephemeron_retainer_[object] = retainer;
     577           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     578           0 :   if (IsRetainingPathTarget(object, &option) &&
     579           0 :       option == RetainingPathOption::kTrackEphemeronPath) {
     580             :     // Check if the retaining path was already printed in AddRetainer().
     581           0 :     if (retainer_.count(object) == 0) {
     582           0 :       PrintRetainingPath(object, option);
     583             :     }
     584             :   }
     585             : }
     586             : 
     587           0 : void Heap::AddRetainingRoot(Root root, HeapObject object) {
     588           0 :   if (retaining_root_.count(object)) return;
     589           0 :   retaining_root_[object] = root;
     590           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     591           0 :   if (IsRetainingPathTarget(object, &option)) {
     592           0 :     PrintRetainingPath(object, option);
     593             :   }
     594             : }
     595             : 
     596           0 : void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
     597           0 :   deferred_counters_[feature]++;
     598           0 : }
     599             : 
     600       17208 : bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
     601             : 
     602       94944 : void Heap::GarbageCollectionPrologue() {
     603      379776 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
     604             :   {
     605             :     AllowHeapAllocation for_the_first_part_of_prologue;
     606       94944 :     gc_count_++;
     607             : 
     608             : #ifdef VERIFY_HEAP
     609             :     if (FLAG_verify_heap) {
     610             :       Verify();
     611             :     }
     612             : #endif
     613             :   }
     614             : 
     615             :   // Reset GC statistics.
     616       94944 :   promoted_objects_size_ = 0;
     617       94944 :   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
     618       94944 :   semi_space_copied_object_size_ = 0;
     619       94944 :   nodes_died_in_new_space_ = 0;
     620       94944 :   nodes_copied_in_new_space_ = 0;
     621       94944 :   nodes_promoted_ = 0;
     622             : 
     623             :   UpdateMaximumCommitted();
     624             : 
     625             : #ifdef DEBUG
     626             :   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
     627             : 
     628             :   if (FLAG_gc_verbose) Print();
     629             : #endif  // DEBUG
     630             : 
     631       94944 :   if (new_space_->IsAtMaximumCapacity()) {
     632        3715 :     maximum_size_scavenges_++;
     633             :   } else {
     634       91229 :     maximum_size_scavenges_ = 0;
     635             :   }
     636       94944 :   CheckNewSpaceExpansionCriteria();
     637             :   UpdateNewSpaceAllocationCounter();
     638       94944 :   if (FLAG_track_retaining_path) {
     639             :     retainer_.clear();
     640             :     ephemeron_retainer_.clear();
     641             :     retaining_root_.clear();
     642             :   }
     643       94944 :   memory_allocator()->unmapper()->PrepareForGC();
     644       94944 : }
     645             : 
     646      216884 : size_t Heap::SizeOfObjects() {
     647             :   size_t total = 0;
     648             : 
     649     7516789 :   for (SpaceIterator it(this); it.has_next();) {
     650     5139304 :     total += it.next()->SizeOfObjects();
     651             :   }
     652      216884 :   return total;
     653             : }
     654             : 
     655             : // static
     656         196 : const char* Heap::GetSpaceName(AllocationSpace space) {
     657         196 :   switch (space) {
     658             :     case NEW_SPACE:
     659             :       return "new_space";
     660             :     case OLD_SPACE:
     661           6 :       return "old_space";
     662             :     case MAP_SPACE:
     663           7 :       return "map_space";
     664             :     case CODE_SPACE:
     665           5 :       return "code_space";
     666             :     case LO_SPACE:
     667           5 :       return "large_object_space";
     668             :     case NEW_LO_SPACE:
     669           5 :       return "new_large_object_space";
     670             :     case CODE_LO_SPACE:
     671           5 :       return "code_large_object_space";
     672             :     case RO_SPACE:
     673         158 :       return "read_only_space";
     674             :   }
     675           0 :   UNREACHABLE();
     676             : }
     677             : 
     678      127463 : void Heap::MergeAllocationSitePretenuringFeedback(
     679             :     const PretenuringFeedbackMap& local_pretenuring_feedback) {
     680             :   AllocationSite site;
     681      210524 :   for (auto& site_and_count : local_pretenuring_feedback) {
     682       83061 :     site = site_and_count.first;
     683             :     MapWord map_word = site_and_count.first->map_word();
     684       83061 :     if (map_word.IsForwardingAddress()) {
     685             :       site = AllocationSite::cast(map_word.ToForwardingAddress());
     686             :     }
     687             : 
     688             :     // We have not validated the allocation site yet, since we have not
     689             :     // dereferenced the site during collecting information.
     690             :     // This is an inlined check of AllocationMemento::IsValid.
     691      166122 :     if (!site->IsAllocationSite() || site->IsZombie()) continue;
     692             : 
     693       83008 :     const int value = static_cast<int>(site_and_count.second);
     694             :     DCHECK_LT(0, value);
     695       83008 :     if (site->IncrementMementoFoundCount(value)) {
     696             :       // For sites in the global map the count is accessed through the site.
     697        3212 :       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
     698             :     }
     699             :   }
     700      127463 : }
     701             : 
     702       30590 : void Heap::AddAllocationObserversToAllSpaces(
     703             :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     704             :   DCHECK(observer && new_space_observer);
     705             : 
     706      275310 :   for (SpaceIterator it(this); it.has_next();) {
     707             :     Space* space = it.next();
     708      244720 :     if (space == new_space()) {
     709       30590 :       space->AddAllocationObserver(new_space_observer);
     710             :     } else {
     711      214130 :       space->AddAllocationObserver(observer);
     712             :     }
     713             :   }
     714       30590 : }
     715             : 
     716          59 : void Heap::RemoveAllocationObserversFromAllSpaces(
     717             :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     718             :   DCHECK(observer && new_space_observer);
     719             : 
     720         531 :   for (SpaceIterator it(this); it.has_next();) {
     721             :     Space* space = it.next();
     722         472 :     if (space == new_space()) {
     723          59 :       space->RemoveAllocationObserver(new_space_observer);
     724             :     } else {
     725         413 :       space->RemoveAllocationObserver(observer);
     726             :     }
     727             :   }
     728          59 : }
     729             : 
     730             : class Heap::SkipStoreBufferScope {
     731             :  public:
     732             :   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
     733             :       : store_buffer_(store_buffer) {
     734       94944 :     store_buffer_->MoveAllEntriesToRememberedSet();
     735       94944 :     store_buffer_->SetMode(StoreBuffer::IN_GC);
     736             :   }
     737             : 
     738             :   ~SkipStoreBufferScope() {
     739             :     DCHECK(store_buffer_->Empty());
     740       94944 :     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
     741             :   }
     742             : 
     743             :  private:
     744             :   StoreBuffer* store_buffer_;
     745             : };
     746             : 
     747             : namespace {
     748        1383 : inline bool MakePretenureDecision(
     749             :     AllocationSite site, AllocationSite::PretenureDecision current_decision,
     750             :     double ratio, bool maximum_size_scavenge) {
     751             :   // Here we just allow state transitions from undecided or maybe tenure
     752             :   // to don't tenure, maybe tenure, or tenure.
     753        2766 :   if ((current_decision == AllocationSite::kUndecided ||
     754        1383 :        current_decision == AllocationSite::kMaybeTenure)) {
     755         938 :     if (ratio >= AllocationSite::kPretenureRatio) {
     756             :       // We just transition into tenure state when the semi-space was at
     757             :       // maximum capacity.
     758         787 :       if (maximum_size_scavenge) {
     759             :         site->set_deopt_dependent_code(true);
     760             :         site->set_pretenure_decision(AllocationSite::kTenure);
     761             :         // Currently we just need to deopt when we make a state transition to
     762             :         // tenure.
     763          47 :         return true;
     764             :       }
     765             :       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
     766             :     } else {
     767             :       site->set_pretenure_decision(AllocationSite::kDontTenure);
     768             :     }
     769             :   }
     770             :   return false;
     771             : }
     772             : 
     773        1383 : inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
     774             :                                       bool maximum_size_scavenge) {
     775             :   bool deopt = false;
     776             :   int create_count = site->memento_create_count();
     777             :   int found_count = site->memento_found_count();
     778             :   bool minimum_mementos_created =
     779             :       create_count >= AllocationSite::kPretenureMinimumCreated;
     780           0 :   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
     781        1383 :                      ? static_cast<double>(found_count) / create_count
     782        2766 :                      : 0.0;
     783             :   AllocationSite::PretenureDecision current_decision =
     784             :       site->pretenure_decision();
     785             : 
     786        1383 :   if (minimum_mementos_created) {
     787        1383 :     deopt = MakePretenureDecision(site, current_decision, ratio,
     788        1383 :                                   maximum_size_scavenge);
     789             :   }
     790             : 
     791        1383 :   if (FLAG_trace_pretenuring_statistics) {
     792           0 :     PrintIsolate(isolate,
     793             :                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
     794             :                  "(%d, %d, %f) %s => %s\n",
     795             :                  reinterpret_cast<void*>(site.ptr()), create_count, found_count,
     796             :                  ratio, site->PretenureDecisionName(current_decision),
     797           0 :                  site->PretenureDecisionName(site->pretenure_decision()));
     798             :   }
     799             : 
     800             :   // Clear feedback calculation fields until the next gc.
     801             :   site->set_memento_found_count(0);
     802             :   site->set_memento_create_count(0);
     803        1383 :   return deopt;
     804             : }
     805             : }  // namespace
     806             : 
     807           0 : void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
     808             :   global_pretenuring_feedback_.erase(site);
     809           0 : }
     810             : 
     811           0 : bool Heap::DeoptMaybeTenuredAllocationSites() {
     812       94944 :   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
     813             : }
     814             : 
     815       94944 : void Heap::ProcessPretenuringFeedback() {
     816       94944 :   bool trigger_deoptimization = false;
     817       94944 :   if (FLAG_allocation_site_pretenuring) {
     818             :     int tenure_decisions = 0;
     819             :     int dont_tenure_decisions = 0;
     820             :     int allocation_mementos_found = 0;
     821       94944 :     int allocation_sites = 0;
     822             :     int active_allocation_sites = 0;
     823             : 
     824       94944 :     AllocationSite site;
     825             : 
     826             :     // Step 1: Digest feedback for recorded allocation sites.
     827             :     bool maximum_size_scavenge = MaximumSizeScavenge();
     828       96327 :     for (auto& site_and_count : global_pretenuring_feedback_) {
     829        1383 :       allocation_sites++;
     830        1383 :       site = site_and_count.first;
     831             :       // Count is always access through the site.
     832             :       DCHECK_EQ(0, site_and_count.second);
     833             :       int found_count = site->memento_found_count();
     834             :       // An entry in the storage does not imply that the count is > 0 because
     835             :       // allocation sites might have been reset due to too many objects dying
     836             :       // in old space.
     837        1383 :       if (found_count > 0) {
     838             :         DCHECK(site->IsAllocationSite());
     839        1383 :         active_allocation_sites++;
     840        1383 :         allocation_mementos_found += found_count;
     841        1383 :         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
     842          47 :           trigger_deoptimization = true;
     843             :         }
     844        1383 :         if (site->GetAllocationType() == AllocationType::kOld) {
     845          54 :           tenure_decisions++;
     846             :         } else {
     847        1329 :           dont_tenure_decisions++;
     848             :         }
     849             :       }
     850             :     }
     851             : 
     852             :     // Step 2: Deopt maybe tenured allocation sites if necessary.
     853             :     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
     854       94944 :     if (deopt_maybe_tenured) {
     855         174 :       ForeachAllocationSite(
     856             :           allocation_sites_list(),
     857         600 :           [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
     858             :             DCHECK(site->IsAllocationSite());
     859         586 :             allocation_sites++;
     860         586 :             if (site->IsMaybeTenure()) {
     861             :               site->set_deopt_dependent_code(true);
     862          14 :               trigger_deoptimization = true;
     863             :             }
     864         174 :           });
     865             :     }
     866             : 
     867       94944 :     if (trigger_deoptimization) {
     868          35 :       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
     869             :     }
     870             : 
     871       94944 :     if (FLAG_trace_pretenuring_statistics &&
     872           0 :         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
     873             :          dont_tenure_decisions > 0)) {
     874           0 :       PrintIsolate(isolate(),
     875             :                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
     876             :                    "active_sites=%d "
     877             :                    "mementos=%d tenured=%d not_tenured=%d\n",
     878             :                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
     879             :                    active_allocation_sites, allocation_mementos_found,
     880           0 :                    tenure_decisions, dont_tenure_decisions);
     881             :     }
     882             : 
     883             :     global_pretenuring_feedback_.clear();
     884             :     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
     885             :   }
     886       94944 : }
     887             : 
     888      297326 : void Heap::InvalidateCodeDeoptimizationData(Code code) {
     889             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
     890      297326 :   CodePageMemoryModificationScope modification_scope(chunk);
     891      297326 :   code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
     892      297326 : }
     893             : 
     894          34 : void Heap::DeoptMarkedAllocationSites() {
     895             :   // TODO(hpayer): If iterating over the allocation sites list becomes a
     896             :   // performance issue, use a cache data structure in heap instead.
     897             : 
     898         218 :   ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
     899         124 :     if (site->deopt_dependent_code()) {
     900         180 :       site->dependent_code()->MarkCodeForDeoptimization(
     901          60 :           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
     902             :       site->set_deopt_dependent_code(false);
     903             :     }
     904         158 :   });
     905             : 
     906          34 :   Deoptimizer::DeoptimizeMarkedCode(isolate_);
     907          34 : }
     908             : 
     909             : 
     910       94944 : void Heap::GarbageCollectionEpilogue() {
     911      379776 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
     912       94944 :   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
     913           0 :     ZapFromSpace();
     914             :   }
     915             : 
     916             : #ifdef VERIFY_HEAP
     917             :   if (FLAG_verify_heap) {
     918             :     Verify();
     919             :   }
     920             : #endif
     921             : 
     922             :   AllowHeapAllocation for_the_rest_of_the_epilogue;
     923             : 
     924             : #ifdef DEBUG
     925             :   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
     926             :   if (FLAG_print_handles) PrintHandles();
     927             :   if (FLAG_gc_verbose) Print();
     928             :   if (FLAG_code_stats) ReportCodeStatistics("After GC");
     929             :   if (FLAG_check_handle_count) CheckHandleCount();
     930             : #endif
     931             : 
     932             :   UpdateMaximumCommitted();
     933             : 
     934       94944 :   isolate_->counters()->alive_after_last_gc()->Set(
     935             :       static_cast<int>(SizeOfObjects()));
     936             : 
     937       94944 :   isolate_->counters()->string_table_capacity()->Set(
     938             :       string_table()->Capacity());
     939       94944 :   isolate_->counters()->number_of_symbols()->Set(
     940             :       string_table()->NumberOfElements());
     941             : 
     942       94944 :   if (CommittedMemory() > 0) {
     943       94944 :     isolate_->counters()->external_fragmentation_total()->AddSample(
     944      189888 :         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
     945             : 
     946       94944 :     isolate_->counters()->heap_sample_total_committed()->AddSample(
     947      189888 :         static_cast<int>(CommittedMemory() / KB));
     948       94944 :     isolate_->counters()->heap_sample_total_used()->AddSample(
     949      189888 :         static_cast<int>(SizeOfObjects() / KB));
     950       94944 :     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
     951      189888 :         static_cast<int>(map_space()->CommittedMemory() / KB));
     952       94944 :     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
     953      189888 :         static_cast<int>(code_space()->CommittedMemory() / KB));
     954             : 
     955       94944 :     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
     956      189888 :         static_cast<int>(MaximumCommittedMemory() / KB));
     957             :   }
     958             : 
     959             : #define UPDATE_COUNTERS_FOR_SPACE(space)                \
     960             :   isolate_->counters()->space##_bytes_available()->Set( \
     961             :       static_cast<int>(space()->Available()));          \
     962             :   isolate_->counters()->space##_bytes_committed()->Set( \
     963             :       static_cast<int>(space()->CommittedMemory()));    \
     964             :   isolate_->counters()->space##_bytes_used()->Set(      \
     965             :       static_cast<int>(space()->SizeOfObjects()));
     966             : #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
     967             :   if (space()->CommittedMemory() > 0) {                                \
     968             :     isolate_->counters()->external_fragmentation_##space()->AddSample( \
     969             :         static_cast<int>(100 -                                         \
     970             :                          (space()->SizeOfObjects() * 100.0) /          \
     971             :                              space()->CommittedMemory()));             \
     972             :   }
     973             : #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
     974             :   UPDATE_COUNTERS_FOR_SPACE(space)                         \
     975             :   UPDATE_FRAGMENTATION_FOR_SPACE(space)
     976             : 
     977      284832 :   UPDATE_COUNTERS_FOR_SPACE(new_space)
     978      474720 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
     979      474720 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
     980      474720 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
     981      399320 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
     982             : #undef UPDATE_COUNTERS_FOR_SPACE
     983             : #undef UPDATE_FRAGMENTATION_FOR_SPACE
     984             : #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
     985             : 
     986             : #ifdef DEBUG
     987             :   ReportStatisticsAfterGC();
     988             : #endif  // DEBUG
     989             : 
     990       94944 :   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
     991             : 
     992             :   {
     993      379776 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
     994       94944 :     ReduceNewSpaceSize();
     995             :   }
     996             : 
     997       94944 :   if (FLAG_harmony_weak_refs) {
     998             :     // TODO(marja): (spec): The exact condition on when to schedule the cleanup
     999             :     // task is unclear. This version schedules the cleanup task for a
    1000             :     // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
    1001             :     // for it (at that point it might have leftover dirty WeakCells since an
    1002             :     // earlier invocation of the cleanup function didn't iterate through
    1003             :     // them). See https://github.com/tc39/proposal-weakrefs/issues/34
    1004             :     HandleScope handle_scope(isolate());
    1005         821 :     while (!isolate()->heap()->dirty_js_finalization_groups()->IsUndefined(
    1006             :         isolate())) {
    1007             :       // Enqueue one microtask per JSFinalizationGroup.
    1008             :       Handle<JSFinalizationGroup> finalization_group(
    1009             :           JSFinalizationGroup::cast(
    1010             :               isolate()->heap()->dirty_js_finalization_groups()),
    1011             :           isolate());
    1012             :       isolate()->heap()->set_dirty_js_finalization_groups(
    1013             :           finalization_group->next());
    1014         416 :       finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
    1015             :       Handle<NativeContext> context(finalization_group->native_context(),
    1016             :                                     isolate());
    1017             :       // GC has no native context, but we use the creation context of the
    1018             :       // JSFinalizationGroup for the EnqueueTask operation. This is consitent
    1019             :       // with the Promise implementation, assuming the JSFinalizationGroup's
    1020             :       // creation context is the "caller's context" in promise functions. An
    1021             :       // alternative would be to use the native context of the cleanup
    1022             :       // function. This difference shouldn't be observable from JavaScript,
    1023             :       // since we enter the native context of the cleanup function before
    1024             :       // calling it. TODO(marja): Revisit when the spec clarifies this. See also
    1025             :       // https://github.com/tc39/proposal-weakrefs/issues/38 .
    1026             :       Handle<FinalizationGroupCleanupJobTask> task =
    1027             :           isolate()->factory()->NewFinalizationGroupCleanupJobTask(
    1028         208 :               finalization_group);
    1029         416 :       context->microtask_queue()->EnqueueMicrotask(*task);
    1030             :     }
    1031             :   }
    1032       94944 : }
    1033             : 
    1034             : class GCCallbacksScope {
    1035             :  public:
    1036             :   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
    1037      229500 :     heap_->gc_callbacks_depth_++;
    1038             :   }
    1039      229500 :   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
    1040             : 
    1041       94944 :   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
    1042             : 
    1043             :  private:
    1044             :   Heap* heap_;
    1045             : };
    1046             : 
    1047             : 
    1048       18387 : void Heap::HandleGCRequest() {
    1049       18387 :   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
    1050             :     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
    1051           0 :     stress_scavenge_observer_->RequestedGCDone();
    1052       18387 :   } else if (HighMemoryPressure()) {
    1053             :     incremental_marking()->reset_request_type();
    1054           5 :     CheckMemoryPressure();
    1055       18382 :   } else if (incremental_marking()->request_type() ==
    1056             :              IncrementalMarking::COMPLETE_MARKING) {
    1057             :     incremental_marking()->reset_request_type();
    1058        6808 :     CollectAllGarbage(current_gc_flags_,
    1059             :                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
    1060             :                       current_gc_callback_flags_);
    1061       11574 :   } else if (incremental_marking()->request_type() ==
    1062       11574 :                  IncrementalMarking::FINALIZATION &&
    1063       23148 :              incremental_marking()->IsMarking() &&
    1064             :              !incremental_marking()->finalize_marking_completed()) {
    1065             :     incremental_marking()->reset_request_type();
    1066             :     FinalizeIncrementalMarkingIncrementally(
    1067       11574 :         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
    1068             :   }
    1069       18387 : }
    1070             : 
    1071             : 
    1072           0 : void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
    1073             :   DCHECK(FLAG_idle_time_scavenge);
    1074             :   DCHECK_NOT_NULL(scavenge_job_);
    1075       32408 :   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
    1076           0 : }
    1077             : 
    1078       94944 : TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
    1079       94944 :   if (IsYoungGenerationCollector(collector)) {
    1080       26098 :     if (isolate_->IsIsolateInBackground()) {
    1081           0 :       return isolate_->counters()->gc_scavenger_background();
    1082             :     }
    1083       26098 :     return isolate_->counters()->gc_scavenger_foreground();
    1084             :   } else {
    1085       68846 :     if (!incremental_marking()->IsStopped()) {
    1086       25209 :       if (ShouldReduceMemory()) {
    1087        1527 :         if (isolate_->IsIsolateInBackground()) {
    1088           0 :           return isolate_->counters()->gc_finalize_reduce_memory_background();
    1089             :         }
    1090        1527 :         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
    1091             :       } else {
    1092       23682 :         if (isolate_->IsIsolateInBackground()) {
    1093           0 :           return isolate_->counters()->gc_finalize_background();
    1094             :         }
    1095       23682 :         return isolate_->counters()->gc_finalize_foreground();
    1096             :       }
    1097             :     } else {
    1098       43637 :       if (isolate_->IsIsolateInBackground()) {
    1099           0 :         return isolate_->counters()->gc_compactor_background();
    1100             :       }
    1101       43637 :       return isolate_->counters()->gc_compactor_foreground();
    1102             :     }
    1103             :   }
    1104             : }
    1105             : 
    1106           0 : TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
    1107       94944 :   if (IsYoungGenerationCollector(collector)) {
    1108       26098 :     return isolate_->counters()->gc_scavenger();
    1109             :   } else {
    1110       68846 :     if (!incremental_marking()->IsStopped()) {
    1111       25209 :       if (ShouldReduceMemory()) {
    1112        1527 :         return isolate_->counters()->gc_finalize_reduce_memory();
    1113             :       } else {
    1114       23682 :         return isolate_->counters()->gc_finalize();
    1115             :       }
    1116             :     } else {
    1117       43637 :       return isolate_->counters()->gc_compactor();
    1118             :     }
    1119             :   }
    1120             : }
    1121             : 
    1122        4126 : void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
    1123             :                              const v8::GCCallbackFlags gc_callback_flags) {
    1124             :   // Since we are ignoring the return value, the exact choice of space does
    1125             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1126             :   // cause a full GC.
    1127             :   set_current_gc_flags(flags);
    1128       64755 :   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
    1129             :   set_current_gc_flags(kNoGCFlags);
    1130        4126 : }
    1131             : 
    1132             : namespace {
    1133             : 
    1134             : intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
    1135           0 :   int slots = size / kTaggedSize;
    1136             :   DCHECK_EQ(a->Size(), size);
    1137             :   DCHECK_EQ(b->Size(), size);
    1138           0 :   Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
    1139           0 :   Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
    1140           0 :   for (int i = 0; i < slots; i++) {
    1141           0 :     if (*slot_a != *slot_b) {
    1142           0 :       return *slot_a - *slot_b;
    1143             :     }
    1144           0 :     slot_a++;
    1145           0 :     slot_b++;
    1146             :   }
    1147             :   return 0;
    1148             : }
    1149             : 
    1150           0 : void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
    1151           0 :   if (objects.size() == 0) return;
    1152             : 
    1153           0 :   sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
    1154             :     intptr_t c = CompareWords(size, a, b);
    1155           0 :     if (c != 0) return c < 0;
    1156             :     return a < b;
    1157             :   });
    1158             : 
    1159             :   std::vector<std::pair<int, HeapObject>> duplicates;
    1160           0 :   HeapObject current = objects[0];
    1161             :   int count = 1;
    1162           0 :   for (size_t i = 1; i < objects.size(); i++) {
    1163           0 :     if (CompareWords(size, current, objects[i]) == 0) {
    1164           0 :       count++;
    1165             :     } else {
    1166           0 :       if (count > 1) {
    1167           0 :         duplicates.push_back(std::make_pair(count - 1, current));
    1168             :       }
    1169             :       count = 1;
    1170           0 :       current = objects[i];
    1171             :     }
    1172             :   }
    1173           0 :   if (count > 1) {
    1174           0 :     duplicates.push_back(std::make_pair(count - 1, current));
    1175             :   }
    1176             : 
    1177           0 :   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
    1178             : 
    1179             :   sort(duplicates.begin(), duplicates.end());
    1180           0 :   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
    1181           0 :     int duplicate_bytes = it->first * size;
    1182           0 :     if (duplicate_bytes < threshold) break;
    1183           0 :     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
    1184           0 :            duplicate_bytes / KB);
    1185           0 :     PrintF("Sample object: ");
    1186           0 :     it->second->Print();
    1187           0 :     PrintF("============================\n");
    1188             :   }
    1189             : }
    1190             : }  // anonymous namespace
    1191             : 
    1192        1265 : void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
    1193             :   // Since we are ignoring the return value, the exact choice of space does
    1194             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1195             :   // cause a full GC.
    1196             :   // Major GC would invoke weak handle callbacks on weakly reachable
    1197             :   // handles, but won't collect weakly reachable objects until next
    1198             :   // major GC.  Therefore if we collect aggressively and weak handle callback
    1199             :   // has been invoked, we rerun major GC to release objects which become
    1200             :   // garbage.
    1201             :   // Note: as weak callbacks can execute arbitrary code, we cannot
    1202             :   // hope that eventually there will be no weak callbacks invocations.
    1203             :   // Therefore stop recollecting after several attempts.
    1204        1265 :   if (gc_reason == GarbageCollectionReason::kLastResort) {
    1205           5 :     InvokeNearHeapLimitCallback();
    1206             :   }
    1207             :   RuntimeCallTimerScope runtime_timer(
    1208        1265 :       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
    1209             : 
    1210             :   // The optimizing compiler may be unnecessarily holding on to memory.
    1211        1265 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1212        1265 :   isolate()->ClearSerializerData();
    1213             :   set_current_gc_flags(kReduceMemoryFootprintMask);
    1214        1265 :   isolate_->compilation_cache()->Clear();
    1215             :   const int kMaxNumberOfAttempts = 7;
    1216             :   const int kMinNumberOfAttempts = 2;
    1217             :   const v8::GCCallbackFlags callback_flags =
    1218             :       gc_reason == GarbageCollectionReason::kLowMemoryNotification
    1219             :           ? v8::kGCCallbackFlagForced
    1220        1265 :           : v8::kGCCallbackFlagCollectAllAvailableGarbage;
    1221        3883 :   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
    1222        2574 :     if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
    1223             :         attempt + 1 >= kMinNumberOfAttempts) {
    1224             :       break;
    1225             :     }
    1226             :   }
    1227             : 
    1228             :   set_current_gc_flags(kNoGCFlags);
    1229        1265 :   new_space_->Shrink();
    1230        2530 :   new_lo_space_->SetCapacity(new_space_->Capacity());
    1231             :   UncommitFromSpace();
    1232        1265 :   EagerlyFreeExternalMemory();
    1233             : 
    1234        1265 :   if (FLAG_trace_duplicate_threshold_kb) {
    1235             :     std::map<int, std::vector<HeapObject>> objects_by_size;
    1236             :     PagedSpaces spaces(this);
    1237           0 :     for (PagedSpace* space = spaces.next(); space != nullptr;
    1238             :          space = spaces.next()) {
    1239           0 :       HeapObjectIterator it(space);
    1240           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1241           0 :         objects_by_size[obj->Size()].push_back(obj);
    1242             :       }
    1243             :     }
    1244             :     {
    1245           0 :       LargeObjectIterator it(lo_space());
    1246           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1247           0 :         objects_by_size[obj->Size()].push_back(obj);
    1248             :       }
    1249             :     }
    1250           0 :     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
    1251             :          ++it) {
    1252           0 :       ReportDuplicates(it->first, it->second);
    1253             :     }
    1254             :   }
    1255        1265 : }
    1256             : 
    1257       33987 : void Heap::PreciseCollectAllGarbage(int flags,
    1258             :                                     GarbageCollectionReason gc_reason,
    1259             :                                     const GCCallbackFlags gc_callback_flags) {
    1260       33987 :   if (!incremental_marking()->IsStopped()) {
    1261             :     FinalizeIncrementalMarkingAtomically(gc_reason);
    1262             :   }
    1263             :   CollectAllGarbage(flags, gc_reason, gc_callback_flags);
    1264       33987 : }
    1265             : 
    1266      992161 : void Heap::ReportExternalMemoryPressure() {
    1267             :   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
    1268             :       static_cast<GCCallbackFlags>(
    1269             :           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
    1270             :           kGCCallbackFlagCollectAllExternalMemory);
    1271     1984322 :   if (isolate()->isolate_data()->external_memory_ >
    1272     1984322 :       (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
    1273             :        external_memory_hard_limit())) {
    1274             :     CollectAllGarbage(
    1275             :         kReduceMemoryFootprintMask,
    1276             :         GarbageCollectionReason::kExternalMemoryPressure,
    1277             :         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
    1278             :                                      kGCCallbackFlagsForExternalMemory));
    1279             :     return;
    1280             :   }
    1281      991328 :   if (incremental_marking()->IsStopped()) {
    1282        1292 :     if (incremental_marking()->CanBeActivated()) {
    1283             :       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
    1284             :                               GarbageCollectionReason::kExternalMemoryPressure,
    1285             :                               kGCCallbackFlagsForExternalMemory);
    1286             :     } else {
    1287             :       CollectAllGarbage(i::Heap::kNoGCFlags,
    1288             :                         GarbageCollectionReason::kExternalMemoryPressure,
    1289             :                         kGCCallbackFlagsForExternalMemory);
    1290             :     }
    1291             :   } else {
    1292             :     // Incremental marking is turned on an has already been started.
    1293             :     const double kMinStepSize = 5;
    1294             :     const double kMaxStepSize = 10;
    1295      990036 :     const double ms_step = Min(
    1296             :         kMaxStepSize,
    1297             :         Max(kMinStepSize,
    1298     1980072 :             static_cast<double>(isolate()->isolate_data()->external_memory_) /
    1299      990036 :                 isolate()->isolate_data()->external_memory_limit_ *
    1300             :                 kMinStepSize));
    1301      990036 :     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
    1302             :     // Extend the gc callback flags with external memory flags.
    1303             :     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
    1304      990036 :         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
    1305             :     incremental_marking()->AdvanceWithDeadline(
    1306      990036 :         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
    1307             :   }
    1308             : }
    1309             : 
    1310       94944 : void Heap::EnsureFillerObjectAtTop() {
    1311             :   // There may be an allocation memento behind objects in new space. Upon
    1312             :   // evacuation of a non-full new space (or if we are on the last page) there
    1313             :   // may be uninitialized memory behind top. We fill the remainder of the page
    1314             :   // with a filler.
    1315       94944 :   Address to_top = new_space_->top();
    1316       94944 :   Page* page = Page::FromAddress(to_top - kTaggedSize);
    1317       94944 :   if (page->Contains(to_top)) {
    1318       93541 :     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
    1319       93541 :     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
    1320             :   }
    1321       94944 : }
    1322             : 
    1323       94944 : bool Heap::CollectGarbage(AllocationSpace space,
    1324             :                           GarbageCollectionReason gc_reason,
    1325             :                           const v8::GCCallbackFlags gc_callback_flags) {
    1326       94944 :   const char* collector_reason = nullptr;
    1327       94944 :   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
    1328       94944 :   is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
    1329             : 
    1330       94944 :   if (!CanExpandOldGeneration(new_space()->Capacity() +
    1331       94944 :                               new_lo_space()->Size())) {
    1332          74 :     InvokeNearHeapLimitCallback();
    1333             :   }
    1334             : 
    1335             :   // Ensure that all pending phantom callbacks are invoked.
    1336       94944 :   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
    1337             : 
    1338             :   // The VM is in the GC state until exiting this function.
    1339             :   VMState<GC> state(isolate());
    1340             : 
    1341             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    1342             :   // Reset the allocation timeout, but make sure to allow at least a few
    1343             :   // allocations after a collection. The reason for this is that we have a lot
    1344             :   // of allocation sequences and we assume that a garbage collection will allow
    1345             :   // the subsequent allocation attempts to go through.
    1346             :   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
    1347             :     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
    1348             :   }
    1349             : #endif
    1350             : 
    1351       94944 :   EnsureFillerObjectAtTop();
    1352             : 
    1353      121042 :   if (IsYoungGenerationCollector(collector) &&
    1354             :       !incremental_marking()->IsStopped()) {
    1355        1196 :     if (FLAG_trace_incremental_marking) {
    1356             :       isolate()->PrintWithTimestamp(
    1357           0 :           "[IncrementalMarking] Scavenge during marking.\n");
    1358             :     }
    1359             :   }
    1360             : 
    1361             :   bool next_gc_likely_to_collect_more = false;
    1362             :   size_t committed_memory_before = 0;
    1363             : 
    1364       94944 :   if (collector == MARK_COMPACTOR) {
    1365       68846 :     committed_memory_before = CommittedOldGenerationMemory();
    1366             :   }
    1367             : 
    1368             :   {
    1369      189888 :     tracer()->Start(collector, gc_reason, collector_reason);
    1370             :     DCHECK(AllowHeapAllocation::IsAllowed());
    1371             :     DisallowHeapAllocation no_allocation_during_gc;
    1372       94944 :     GarbageCollectionPrologue();
    1373             : 
    1374             :     {
    1375             :       TimedHistogram* gc_type_timer = GCTypeTimer(collector);
    1376       94944 :       TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
    1377      284832 :       TRACE_EVENT0("v8", gc_type_timer->name());
    1378             : 
    1379       94944 :       TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
    1380             :       OptionalTimedHistogramScopeMode mode =
    1381       94944 :           isolate_->IsMemorySavingsModeActive()
    1382             :               ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
    1383       94944 :               : OptionalTimedHistogramScopeMode::TAKE_TIME;
    1384             :       OptionalTimedHistogramScope histogram_timer_priority_scope(
    1385             :           gc_type_priority_timer, isolate_, mode);
    1386             : 
    1387             :       next_gc_likely_to_collect_more =
    1388       94944 :           PerformGarbageCollection(collector, gc_callback_flags);
    1389       94944 :       if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
    1390       94944 :         tracer()->RecordGCPhasesHistograms(gc_type_timer);
    1391             :       }
    1392             :     }
    1393             : 
    1394             :     // Clear is_current_gc_forced now that the current GC is complete. Do this
    1395             :     // before GarbageCollectionEpilogue() since that could trigger another
    1396             :     // unforced GC.
    1397       94944 :     is_current_gc_forced_ = false;
    1398             : 
    1399       94944 :     GarbageCollectionEpilogue();
    1400       94944 :     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
    1401       68846 :       isolate()->CheckDetachedContextsAfterGC();
    1402             :     }
    1403             : 
    1404       94944 :     if (collector == MARK_COMPACTOR) {
    1405       68846 :       size_t committed_memory_after = CommittedOldGenerationMemory();
    1406       68846 :       size_t used_memory_after = OldGenerationSizeOfObjects();
    1407             :       MemoryReducer::Event event;
    1408       68846 :       event.type = MemoryReducer::kMarkCompact;
    1409       68846 :       event.time_ms = MonotonicallyIncreasingTimeInMs();
    1410             :       // Trigger one more GC if
    1411             :       // - this GC decreased committed memory,
    1412             :       // - there is high fragmentation,
    1413             :       // - there are live detached contexts.
    1414             :       event.next_gc_likely_to_collect_more =
    1415      137335 :           (committed_memory_before > committed_memory_after + MB) ||
    1416      137335 :           HasHighFragmentation(used_memory_after, committed_memory_after) ||
    1417       68846 :           (detached_contexts()->length() > 0);
    1418       68846 :       event.committed_memory = committed_memory_after;
    1419       68846 :       if (deserialization_complete_) {
    1420       68846 :         memory_reducer_->NotifyMarkCompact(event);
    1421             :       }
    1422       68877 :       if (initial_max_old_generation_size_ < max_old_generation_size_ &&
    1423          31 :           used_memory_after < initial_max_old_generation_size_threshold_) {
    1424           4 :         max_old_generation_size_ = initial_max_old_generation_size_;
    1425             :       }
    1426             :     }
    1427             : 
    1428       94944 :     tracer()->Stop(collector);
    1429             :   }
    1430             : 
    1431      163790 :   if (collector == MARK_COMPACTOR &&
    1432       68846 :       (gc_callback_flags & (kGCCallbackFlagForced |
    1433             :                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
    1434       26634 :     isolate()->CountUsage(v8::Isolate::kForcedGC);
    1435             :   }
    1436             : 
    1437             :   // Start incremental marking for the next cycle. We do this only for scavenger
    1438             :   // to avoid a loop where mark-compact causes another mark-compact.
    1439       94944 :   if (IsYoungGenerationCollector(collector)) {
    1440             :     StartIncrementalMarkingIfAllocationLimitIsReached(
    1441             :         GCFlagsForIncrementalMarking(),
    1442       26098 :         kGCCallbackScheduleIdleGarbageCollection);
    1443             :   }
    1444             : 
    1445       94944 :   return next_gc_likely_to_collect_more;
    1446             : }
    1447             : 
    1448             : 
    1449         650 : int Heap::NotifyContextDisposed(bool dependant_context) {
    1450         650 :   if (!dependant_context) {
    1451          10 :     tracer()->ResetSurvivalEvents();
    1452          10 :     old_generation_size_configured_ = false;
    1453          10 :     old_generation_allocation_limit_ = initial_old_generation_size_;
    1454             :     MemoryReducer::Event event;
    1455          10 :     event.type = MemoryReducer::kPossibleGarbage;
    1456          10 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    1457          10 :     memory_reducer_->NotifyPossibleGarbage(event);
    1458             :   }
    1459         650 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1460             : 
    1461         650 :   number_of_disposed_maps_ = retained_maps()->length();
    1462         650 :   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
    1463         650 :   return ++contexts_disposed_;
    1464             : }
    1465             : 
    1466        1418 : void Heap::StartIncrementalMarking(int gc_flags,
    1467             :                                    GarbageCollectionReason gc_reason,
    1468             :                                    GCCallbackFlags gc_callback_flags) {
    1469             :   DCHECK(incremental_marking()->IsStopped());
    1470             :   set_current_gc_flags(gc_flags);
    1471       30516 :   current_gc_callback_flags_ = gc_callback_flags;
    1472       30516 :   incremental_marking()->Start(gc_reason);
    1473        1418 : }
    1474             : 
    1475     1632704 : void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
    1476             :     int gc_flags, const GCCallbackFlags gc_callback_flags) {
    1477     1632704 :   if (incremental_marking()->IsStopped()) {
    1478     1374501 :     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
    1479     1374503 :     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
    1480        2347 :       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
    1481     1372156 :     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
    1482             :       StartIncrementalMarking(gc_flags,
    1483             :                               GarbageCollectionReason::kAllocationLimit,
    1484             :                               gc_callback_flags);
    1485             :     }
    1486             :   }
    1487     1632706 : }
    1488             : 
    1489          14 : void Heap::StartIdleIncrementalMarking(
    1490             :     GarbageCollectionReason gc_reason,
    1491             :     const GCCallbackFlags gc_callback_flags) {
    1492             :   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
    1493             :                           gc_callback_flags);
    1494          14 : }
    1495             : 
    1496     4782724 : void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
    1497             :                         WriteBarrierMode mode) {
    1498     4782724 :   if (len == 0) return;
    1499             : 
    1500             :   DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
    1501             :   ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
    1502             :   ObjectSlot src = array->RawFieldOfElementAt(src_index);
    1503     9565443 :   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
    1504     1850699 :     if (dst < src) {
    1505     6321280 :       for (int i = 0; i < len; i++) {
    1506             :         dst.Relaxed_Store(src.Relaxed_Load());
    1507             :         ++dst;
    1508             :         ++src;
    1509             :       }
    1510             :     } else {
    1511             :       // Copy backwards.
    1512         111 :       dst += len - 1;
    1513             :       src += len - 1;
    1514         535 :       for (int i = 0; i < len; i++) {
    1515             :         dst.Relaxed_Store(src.Relaxed_Load());
    1516             :         --dst;
    1517             :         --src;
    1518             :       }
    1519             :     }
    1520             :   } else {
    1521     2932025 :     MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
    1522             :   }
    1523     4782724 :   if (mode == SKIP_WRITE_BARRIER) return;
    1524     4602945 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
    1525             : }
    1526             : 
    1527      643500 : void Heap::CopyElements(FixedArray dst_array, FixedArray src_array,
    1528             :                         int dst_index, int src_index, int len,
    1529             :                         WriteBarrierMode mode) {
    1530             :   DCHECK_NE(dst_array, src_array);
    1531      643500 :   if (len == 0) return;
    1532             : 
    1533             :   DCHECK_NE(dst_array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
    1534             :   ObjectSlot dst = dst_array->RawFieldOfElementAt(dst_index);
    1535             :   ObjectSlot src = src_array->RawFieldOfElementAt(src_index);
    1536             :   // Ensure ranges do not overlap.
    1537             :   DCHECK(dst + len <= src || src + len <= dst);
    1538     1287000 :   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
    1539       14876 :     if (dst < src) {
    1540   382699531 :       for (int i = 0; i < len; i++) {
    1541             :         dst.Relaxed_Store(src.Relaxed_Load());
    1542             :         ++dst;
    1543             :         ++src;
    1544             :       }
    1545             :     } else {
    1546             :       // Copy backwards.
    1547       12221 :       dst += len - 1;
    1548             :       src += len - 1;
    1549   446091397 :       for (int i = 0; i < len; i++) {
    1550             :         dst.Relaxed_Store(src.Relaxed_Load());
    1551             :         --dst;
    1552             :         --src;
    1553             :       }
    1554             :     }
    1555             :   } else {
    1556      628624 :     MemCopy(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
    1557             :   }
    1558      643500 :   if (mode == SKIP_WRITE_BARRIER) return;
    1559      616361 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst_array, dst_index, len);
    1560             : }
    1561             : 
    1562             : #ifdef VERIFY_HEAP
    1563             : // Helper class for verifying the string table.
    1564             : class StringTableVerifier : public ObjectVisitor {
    1565             :  public:
    1566             :   explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
    1567             : 
    1568             :   void VisitPointers(HeapObject host, ObjectSlot start,
    1569             :                      ObjectSlot end) override {
    1570             :     // Visit all HeapObject pointers in [start, end).
    1571             :     for (ObjectSlot p = start; p < end; ++p) {
    1572             :       DCHECK(!HasWeakHeapObjectTag(*p));
    1573             :       if ((*p)->IsHeapObject()) {
    1574             :         HeapObject object = HeapObject::cast(*p);
    1575             :         // Check that the string is actually internalized.
    1576             :         CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
    1577             :               object->IsInternalizedString());
    1578             :       }
    1579             :     }
    1580             :   }
    1581             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    1582             :                      MaybeObjectSlot end) override {
    1583             :     UNREACHABLE();
    1584             :   }
    1585             : 
    1586             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
    1587             : 
    1588             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    1589             :     UNREACHABLE();
    1590             :   }
    1591             : 
    1592             :  private:
    1593             :   Isolate* isolate_;
    1594             : };
    1595             : 
    1596             : static void VerifyStringTable(Isolate* isolate) {
    1597             :   StringTableVerifier verifier(isolate);
    1598             :   isolate->heap()->string_table()->IterateElements(&verifier);
    1599             : }
    1600             : #endif  // VERIFY_HEAP
    1601             : 
    1602      216656 : bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
    1603             :   bool gc_performed = true;
    1604             :   int counter = 0;
    1605             :   static const int kThreshold = 20;
    1606      433317 :   while (gc_performed && counter++ < kThreshold) {
    1607             :     gc_performed = false;
    1608     1299946 :     for (int space = FIRST_SPACE;
    1609     1516607 :          space < SerializerDeserializer::kNumberOfSpaces; space++) {
    1610     1299948 :       Reservation* reservation = &reservations[space];
    1611             :       DCHECK_LE(1, reservation->size());
    1612     1299948 :       if (reservation->at(0).size == 0) {
    1613             :         DCHECK_EQ(1, reservation->size());
    1614             :         continue;
    1615             :       }
    1616             :       bool perform_gc = false;
    1617      433358 :       if (space == MAP_SPACE) {
    1618             :         // We allocate each map individually to avoid fragmentation.
    1619             :         maps->clear();
    1620             :         DCHECK_LE(reservation->size(), 2);
    1621             :         int reserved_size = 0;
    1622      308124 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1623             :         DCHECK_EQ(0, reserved_size % Map::kSize);
    1624      154062 :         int num_maps = reserved_size / Map::kSize;
    1625    46058410 :         for (int i = 0; i < num_maps; i++) {
    1626             :           // The deserializer will update the skip list.
    1627             :           AllocationResult allocation = map_space()->AllocateRawUnaligned(
    1628    22952170 :               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
    1629             :           HeapObject free_space;
    1630    22952177 :           if (allocation.To(&free_space)) {
    1631             :             // Mark with a free list node, in case we have a GC before
    1632             :             // deserializing.
    1633    22952177 :             Address free_space_address = free_space->address();
    1634             :             CreateFillerObjectAt(free_space_address, Map::kSize,
    1635    22952177 :                                  ClearRecordedSlots::kNo);
    1636    22952172 :             maps->push_back(free_space_address);
    1637             :           } else {
    1638             :             perform_gc = true;
    1639           0 :             break;
    1640             :           }
    1641             :         }
    1642      279296 :       } else if (space == LO_SPACE) {
    1643             :         // Just check that we can allocate during deserialization.
    1644             :         DCHECK_LE(reservation->size(), 2);
    1645             :         int reserved_size = 0;
    1646          80 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1647          40 :         perform_gc = !CanExpandOldGeneration(reserved_size);
    1648             :       } else {
    1649     2854347 :         for (auto& chunk : *reservation) {
    1650             :           AllocationResult allocation;
    1651     2575097 :           int size = chunk.size;
    1652             :           DCHECK_LE(static_cast<size_t>(size),
    1653             :                     MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1654             :                         static_cast<AllocationSpace>(space)));
    1655     2575097 :           if (space == NEW_SPACE) {
    1656             :             allocation = new_space()->AllocateRawUnaligned(size);
    1657             :           } else {
    1658             :             // The deserializer will update the skip list.
    1659             :             allocation = paged_space(space)->AllocateRawUnaligned(
    1660     2574888 :                 size, PagedSpace::IGNORE_SKIP_LIST);
    1661             :           }
    1662             :           HeapObject free_space;
    1663     2575092 :           if (allocation.To(&free_space)) {
    1664             :             // Mark with a free list node, in case we have a GC before
    1665             :             // deserializing.
    1666             :             Address free_space_address = free_space->address();
    1667             :             CreateFillerObjectAt(free_space_address, size,
    1668     2575087 :                                  ClearRecordedSlots::kNo);
    1669             :             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
    1670             :                       space);
    1671     2575091 :             chunk.start = free_space_address;
    1672     2575091 :             chunk.end = free_space_address + size;
    1673             :           } else {
    1674             :             perform_gc = true;
    1675             :             break;
    1676             :           }
    1677             :         }
    1678             :       }
    1679      433361 :       if (perform_gc) {
    1680             :         // We cannot perfom a GC with an uninitialized isolate. This check
    1681             :         // fails for example if the max old space size is chosen unwisely,
    1682             :         // so that we cannot allocate space to deserialize the initial heap.
    1683           5 :         if (!deserialization_complete_) {
    1684             :           V8::FatalProcessOutOfMemory(
    1685           0 :               isolate(), "insufficient memory to create an Isolate");
    1686             :         }
    1687           5 :         if (space == NEW_SPACE) {
    1688           0 :           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
    1689             :         } else {
    1690           5 :           if (counter > 1) {
    1691             :             CollectAllGarbage(kReduceMemoryFootprintMask,
    1692             :                               GarbageCollectionReason::kDeserializer);
    1693             :           } else {
    1694             :             CollectAllGarbage(kNoGCFlags,
    1695             :                               GarbageCollectionReason::kDeserializer);
    1696             :           }
    1697             :         }
    1698             :         gc_performed = true;
    1699             :         break;  // Abort for-loop over spaces and retry.
    1700             :       }
    1701             :     }
    1702             :   }
    1703             : 
    1704      216656 :   return !gc_performed;
    1705             : }
    1706             : 
    1707             : 
    1708       94944 : void Heap::EnsureFromSpaceIsCommitted() {
    1709      189888 :   if (new_space_->CommitFromSpaceIfNeeded()) return;
    1710             : 
    1711             :   // Committing memory to from space failed.
    1712             :   // Memory is exhausted and we will die.
    1713           0 :   FatalProcessOutOfMemory("Committing semi space failed.");
    1714             : }
    1715             : 
    1716             : 
    1717       94944 : void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
    1718       94944 :   if (start_new_space_size == 0) return;
    1719             : 
    1720       83708 :   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
    1721       83708 :                       static_cast<double>(start_new_space_size) * 100);
    1722             : 
    1723       83708 :   if (previous_semi_space_copied_object_size_ > 0) {
    1724             :     promotion_rate_ =
    1725       55382 :         (static_cast<double>(promoted_objects_size_) /
    1726       55382 :          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
    1727             :   } else {
    1728       28326 :     promotion_rate_ = 0;
    1729             :   }
    1730             : 
    1731             :   semi_space_copied_rate_ =
    1732       83708 :       (static_cast<double>(semi_space_copied_object_size_) /
    1733       83708 :        static_cast<double>(start_new_space_size) * 100);
    1734             : 
    1735       83708 :   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
    1736       83708 :   tracer()->AddSurvivalRatio(survival_rate);
    1737             : }
    1738             : 
    1739       94944 : bool Heap::PerformGarbageCollection(
    1740             :     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
    1741      189888 :   DisallowJavascriptExecution no_js(isolate());
    1742             : 
    1743             :   size_t freed_global_handles = 0;
    1744             : 
    1745       94944 :   if (!IsYoungGenerationCollector(collector)) {
    1746      137692 :     PROFILE(isolate_, CodeMovingGCEvent());
    1747             :   }
    1748             : 
    1749             : #ifdef VERIFY_HEAP
    1750             :   if (FLAG_verify_heap) {
    1751             :     VerifyStringTable(this->isolate());
    1752             :   }
    1753             : #endif
    1754             : 
    1755             :   GCType gc_type =
    1756       94944 :       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
    1757             : 
    1758             :   {
    1759             :     GCCallbacksScope scope(this);
    1760             :     // Temporary override any embedder stack state as callbacks may create their
    1761             :     // own state on the stack and recursively trigger GC.
    1762             :     EmbedderStackStateScope embedder_scope(
    1763             :         local_embedder_heap_tracer(),
    1764             :         EmbedderHeapTracer::EmbedderStackState::kUnknown);
    1765       94944 :     if (scope.CheckReenter()) {
    1766             :       AllowHeapAllocation allow_allocation;
    1767      189824 :       AllowJavascriptExecution allow_js(isolate());
    1768      379648 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
    1769      189824 :       VMState<EXTERNAL> state(isolate_);
    1770       94912 :       HandleScope handle_scope(isolate_);
    1771       94912 :       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
    1772             :     }
    1773             :   }
    1774             : 
    1775       94944 :   EnsureFromSpaceIsCommitted();
    1776             : 
    1777             :   size_t start_young_generation_size =
    1778       94944 :       Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
    1779             : 
    1780             :   {
    1781             :     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
    1782             : 
    1783       94944 :     switch (collector) {
    1784             :       case MARK_COMPACTOR:
    1785             :         UpdateOldGenerationAllocationCounter();
    1786             :         // Perform mark-sweep with optional compaction.
    1787       68846 :         MarkCompact();
    1788       68846 :         old_generation_size_configured_ = true;
    1789             :         // This should be updated before PostGarbageCollectionProcessing, which
    1790             :         // can cause another GC. Take into account the objects promoted during
    1791             :         // GC.
    1792             :         old_generation_allocation_counter_at_last_gc_ +=
    1793       68846 :             static_cast<size_t>(promoted_objects_size_);
    1794       68846 :         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
    1795       68846 :         break;
    1796             :       case MINOR_MARK_COMPACTOR:
    1797           0 :         MinorMarkCompact();
    1798           0 :         break;
    1799             :       case SCAVENGER:
    1800       26098 :         if ((fast_promotion_mode_ &&
    1801           0 :              CanExpandOldGeneration(new_space()->Size() +
    1802           0 :                                     new_lo_space()->Size()))) {
    1803             :           tracer()->NotifyYoungGenerationHandling(
    1804           0 :               YoungGenerationHandling::kFastPromotionDuringScavenge);
    1805           0 :           EvacuateYoungGeneration();
    1806             :         } else {
    1807             :           tracer()->NotifyYoungGenerationHandling(
    1808       26098 :               YoungGenerationHandling::kRegularScavenge);
    1809             : 
    1810       26098 :           Scavenge();
    1811             :         }
    1812             :         break;
    1813             :     }
    1814             : 
    1815       94944 :     ProcessPretenuringFeedback();
    1816             :   }
    1817             : 
    1818       94944 :   UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
    1819       94944 :   ConfigureInitialOldGenerationSize();
    1820             : 
    1821       94944 :   if (collector != MARK_COMPACTOR) {
    1822             :     // Objects that died in the new space might have been accounted
    1823             :     // as bytes marked ahead of schedule by the incremental marker.
    1824       26098 :     incremental_marking()->UpdateMarkedBytesAfterScavenge(
    1825       26098 :         start_young_generation_size - SurvivedYoungObjectSize());
    1826             :   }
    1827             : 
    1828       94944 :   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
    1829       94944 :     ComputeFastPromotionMode();
    1830             :   }
    1831             : 
    1832       94944 :   isolate_->counters()->objs_since_last_young()->Set(0);
    1833             : 
    1834             :   {
    1835      379776 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1836             :     // First round weak callbacks are not supposed to allocate and trigger
    1837             :     // nested GCs.
    1838             :     freed_global_handles =
    1839       94944 :         isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
    1840             :   }
    1841             : 
    1842       94944 :   if (collector == MARK_COMPACTOR) {
    1843      275384 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
    1844             :     // TraceEpilogue may trigger operations that invalidate global handles. It
    1845             :     // has to be called *after* all other operations that potentially touch and
    1846             :     // reset global handles. It is also still part of the main garbage
    1847             :     // collection pause and thus needs to be called *before* any operation that
    1848             :     // can potentially trigger recursive garbage
    1849       68846 :     local_embedder_heap_tracer()->TraceEpilogue();
    1850             :   }
    1851             : 
    1852             :   {
    1853      379776 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1854       94944 :     gc_post_processing_depth_++;
    1855             :     {
    1856             :       AllowHeapAllocation allow_allocation;
    1857      189888 :       AllowJavascriptExecution allow_js(isolate());
    1858             :       freed_global_handles +=
    1859       94944 :           isolate_->global_handles()->PostGarbageCollectionProcessing(
    1860       94944 :               collector, gc_callback_flags);
    1861             :     }
    1862       94944 :     gc_post_processing_depth_--;
    1863             :   }
    1864             : 
    1865       94944 :   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
    1866             : 
    1867             :   // Update relocatables.
    1868       94944 :   Relocatable::PostGarbageCollectionProcessing(isolate_);
    1869             : 
    1870       94944 :   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
    1871             :   double mutator_speed =
    1872       94944 :       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
    1873       94944 :   size_t old_gen_size = OldGenerationSizeOfObjects();
    1874       94944 :   if (collector == MARK_COMPACTOR) {
    1875             :     // Register the amount of external allocated memory.
    1876             :     isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
    1877       68846 :         isolate()->isolate_data()->external_memory_;
    1878             :     isolate()->isolate_data()->external_memory_limit_ =
    1879       68846 :         isolate()->isolate_data()->external_memory_ +
    1880       68846 :         kExternalAllocationSoftLimit;
    1881             : 
    1882             :     double max_factor =
    1883      137692 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1884      206538 :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1885             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1886       68846 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1887       68846 :     old_generation_allocation_limit_ = new_limit;
    1888             : 
    1889       68846 :     CheckIneffectiveMarkCompact(
    1890       68846 :         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
    1891       28220 :   } else if (HasLowYoungGenerationAllocationRate() &&
    1892        2122 :              old_generation_size_configured_) {
    1893             :     double max_factor =
    1894        2162 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1895        3243 :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1896             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1897        1081 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1898        1081 :     if (new_limit < old_generation_allocation_limit_) {
    1899          13 :       old_generation_allocation_limit_ = new_limit;
    1900             :     }
    1901             :   }
    1902             : 
    1903             :   {
    1904             :     GCCallbacksScope scope(this);
    1905       94944 :     if (scope.CheckReenter()) {
    1906             :       AllowHeapAllocation allow_allocation;
    1907      189824 :       AllowJavascriptExecution allow_js(isolate());
    1908      379648 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
    1909      189824 :       VMState<EXTERNAL> state(isolate_);
    1910       94912 :       HandleScope handle_scope(isolate_);
    1911       94912 :       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
    1912             :     }
    1913             :   }
    1914             : 
    1915             : #ifdef VERIFY_HEAP
    1916             :   if (FLAG_verify_heap) {
    1917             :     VerifyStringTable(this->isolate());
    1918             :   }
    1919             : #endif
    1920             : 
    1921      189888 :   return freed_global_handles > 0;
    1922             : }
    1923             : 
    1924             : 
    1925      120068 : void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1926             :   RuntimeCallTimerScope runtime_timer(
    1927      120068 :       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
    1928      120133 :   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
    1929          65 :     if (gc_type & info.gc_type) {
    1930             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1931          65 :       info.callback(isolate, gc_type, flags, info.data);
    1932             :     }
    1933             :   }
    1934      120068 : }
    1935             : 
    1936      120068 : void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1937             :   RuntimeCallTimerScope runtime_timer(
    1938      120068 :       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
    1939      240201 :   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
    1940      120133 :     if (gc_type & info.gc_type) {
    1941             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1942       68899 :       info.callback(isolate, gc_type, flags, info.data);
    1943             :     }
    1944             :   }
    1945      120068 : }
    1946             : 
    1947             : 
    1948       68846 : void Heap::MarkCompact() {
    1949      137692 :   PauseAllocationObserversScope pause_observers(this);
    1950             : 
    1951             :   SetGCState(MARK_COMPACT);
    1952             : 
    1953       68846 :   LOG(isolate_, ResourceEvent("markcompact", "begin"));
    1954             : 
    1955             :   uint64_t size_of_objects_before_gc = SizeOfObjects();
    1956             : 
    1957      137692 :   CodeSpaceMemoryModificationScope code_modifcation(this);
    1958             : 
    1959       68846 :   mark_compact_collector()->Prepare();
    1960             : 
    1961       68846 :   ms_count_++;
    1962             : 
    1963       68846 :   MarkCompactPrologue();
    1964             : 
    1965       68846 :   mark_compact_collector()->CollectGarbage();
    1966             : 
    1967       68846 :   LOG(isolate_, ResourceEvent("markcompact", "end"));
    1968             : 
    1969       68846 :   MarkCompactEpilogue();
    1970             : 
    1971       68846 :   if (FLAG_allocation_site_pretenuring) {
    1972       68846 :     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
    1973             :   }
    1974       68846 : }
    1975             : 
    1976           0 : void Heap::MinorMarkCompact() {
    1977             : #ifdef ENABLE_MINOR_MC
    1978             :   DCHECK(FLAG_minor_mc);
    1979             : 
    1980           0 :   PauseAllocationObserversScope pause_observers(this);
    1981             :   SetGCState(MINOR_MARK_COMPACT);
    1982           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
    1983             : 
    1984           0 :   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
    1985             :   AlwaysAllocateScope always_allocate(isolate());
    1986             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    1987             :       incremental_marking());
    1988           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1989             : 
    1990           0 :   minor_mark_compact_collector()->CollectGarbage();
    1991             : 
    1992           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
    1993             :   SetGCState(NOT_IN_GC);
    1994             : #else
    1995             :   UNREACHABLE();
    1996             : #endif  // ENABLE_MINOR_MC
    1997           0 : }
    1998             : 
    1999       68846 : void Heap::MarkCompactEpilogue() {
    2000      275384 :   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
    2001             :   SetGCState(NOT_IN_GC);
    2002             : 
    2003       68846 :   isolate_->counters()->objs_since_last_full()->Set(0);
    2004             : 
    2005       68846 :   incremental_marking()->Epilogue();
    2006             : 
    2007             :   DCHECK(incremental_marking()->IsStopped());
    2008       68846 : }
    2009             : 
    2010             : 
    2011       68846 : void Heap::MarkCompactPrologue() {
    2012      275384 :   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
    2013       68846 :   isolate_->descriptor_lookup_cache()->Clear();
    2014       68846 :   RegExpResultsCache::Clear(string_split_cache());
    2015       68846 :   RegExpResultsCache::Clear(regexp_multiple_cache());
    2016             : 
    2017       68846 :   isolate_->compilation_cache()->MarkCompactPrologue();
    2018             : 
    2019       68846 :   FlushNumberStringCache();
    2020       68846 : }
    2021             : 
    2022             : 
    2023       94944 : void Heap::CheckNewSpaceExpansionCriteria() {
    2024       94944 :   if (FLAG_experimental_new_space_growth_heuristic) {
    2025           0 :     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    2026           0 :         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
    2027             :       // Grow the size of new space if there is room to grow, and more than 10%
    2028             :       // have survived the last scavenge.
    2029           0 :       new_space_->Grow();
    2030           0 :       survived_since_last_expansion_ = 0;
    2031             :     }
    2032      186173 :   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    2033       91229 :              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
    2034             :     // Grow the size of new space if there is room to grow, and enough data
    2035             :     // has survived scavenge since the last expansion.
    2036        2561 :     new_space_->Grow();
    2037        2561 :     survived_since_last_expansion_ = 0;
    2038             :   }
    2039       94944 :   new_lo_space()->SetCapacity(new_space()->Capacity());
    2040       94944 : }
    2041             : 
    2042           0 : void Heap::EvacuateYoungGeneration() {
    2043           0 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
    2044             :   base::MutexGuard guard(relocation_mutex());
    2045           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2046             :   if (!FLAG_concurrent_marking) {
    2047             :     DCHECK(fast_promotion_mode_);
    2048             :     DCHECK(
    2049             :         CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
    2050             :   }
    2051             : 
    2052           0 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2053             : 
    2054             :   SetGCState(SCAVENGE);
    2055           0 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2056             : 
    2057             :   // Move pages from new->old generation.
    2058             :   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
    2059           0 :   for (auto it = range.begin(); it != range.end();) {
    2060             :     Page* p = (*++it)->prev_page();
    2061           0 :     new_space()->from_space().RemovePage(p);
    2062           0 :     Page::ConvertNewToOld(p);
    2063           0 :     if (incremental_marking()->IsMarking())
    2064           0 :       mark_compact_collector()->RecordLiveSlotsOnPage(p);
    2065             :   }
    2066             : 
    2067             :   // Reset new space.
    2068           0 :   if (!new_space()->Rebalance()) {
    2069           0 :     FatalProcessOutOfMemory("NewSpace::Rebalance");
    2070             :   }
    2071           0 :   new_space()->ResetLinearAllocationArea();
    2072             :   new_space()->set_age_mark(new_space()->top());
    2073             : 
    2074           0 :   for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
    2075             :     LargePage* page = *it;
    2076             :     // Increment has to happen after we save the page, because it is going to
    2077             :     // be removed below.
    2078             :     it++;
    2079           0 :     lo_space()->PromoteNewLargeObject(page);
    2080             :   }
    2081             : 
    2082             :   // Fix up special trackers.
    2083           0 :   external_string_table_.PromoteYoung();
    2084             :   // GlobalHandles are updated in PostGarbageCollectonProcessing
    2085             : 
    2086           0 :   size_t promoted = new_space()->Size() + new_lo_space()->Size();
    2087             :   IncrementYoungSurvivorsCounter(promoted);
    2088             :   IncrementPromotedObjectsSize(promoted);
    2089             :   IncrementSemiSpaceCopiedObjectSize(0);
    2090             : 
    2091           0 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2092             :   SetGCState(NOT_IN_GC);
    2093           0 : }
    2094             : 
    2095       26098 : void Heap::Scavenge() {
    2096      104392 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
    2097             :   base::MutexGuard guard(relocation_mutex());
    2098       52196 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2099             :   // There are soft limits in the allocation code, designed to trigger a mark
    2100             :   // sweep collection by failing allocations. There is no sense in trying to
    2101             :   // trigger one during scavenge: scavenges allocation should always succeed.
    2102             :   AlwaysAllocateScope scope(isolate());
    2103             : 
    2104             :   // Bump-pointer allocations done during scavenge are not real allocations.
    2105             :   // Pause the inline allocation steps.
    2106       52196 :   PauseAllocationObserversScope pause_observers(this);
    2107             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    2108             :       incremental_marking());
    2109             : 
    2110             : 
    2111       26098 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2112             : 
    2113             :   SetGCState(SCAVENGE);
    2114             : 
    2115             :   // Flip the semispaces.  After flipping, to space is empty, from space has
    2116             :   // live objects.
    2117       26098 :   new_space()->Flip();
    2118       26098 :   new_space()->ResetLinearAllocationArea();
    2119             : 
    2120             :   // We also flip the young generation large object space. All large objects
    2121             :   // will be in the from space.
    2122       26098 :   new_lo_space()->Flip();
    2123             :   new_lo_space()->ResetPendingObject();
    2124             : 
    2125             :   // Implements Cheney's copying algorithm
    2126       26098 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2127             : 
    2128       26098 :   scavenger_collector_->CollectGarbage();
    2129             : 
    2130       26098 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2131             : 
    2132             :   SetGCState(NOT_IN_GC);
    2133       26098 : }
    2134             : 
    2135       94944 : void Heap::ComputeFastPromotionMode() {
    2136             :   const size_t survived_in_new_space =
    2137      189888 :       survived_last_scavenge_ * 100 / new_space_->Capacity();
    2138             :   fast_promotion_mode_ =
    2139      189888 :       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
    2140       94944 :       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
    2141       94944 :       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
    2142       94944 :   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    2143           0 :     PrintIsolate(
    2144             :         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
    2145           0 :         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
    2146             :   }
    2147       94944 : }
    2148             : 
    2149     2035635 : void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
    2150     2035635 :   if (unprotected_memory_chunks_registry_enabled_) {
    2151     1827721 :     base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
    2152     1827717 :     if (unprotected_memory_chunks_.insert(chunk).second) {
    2153     1822624 :       chunk->SetReadAndWritable();
    2154             :     }
    2155             :   }
    2156     2035635 : }
    2157             : 
    2158     1212854 : void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
    2159     1947118 :   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
    2160     1212854 : }
    2161             : 
    2162      131712 : void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
    2163             :   unprotected_memory_chunks_.erase(chunk);
    2164      131712 : }
    2165             : 
    2166     1777090 : void Heap::ProtectUnprotectedMemoryChunks() {
    2167             :   DCHECK(unprotected_memory_chunks_registry_enabled_);
    2168     3599717 :   for (auto chunk = unprotected_memory_chunks_.begin();
    2169             :        chunk != unprotected_memory_chunks_.end(); chunk++) {
    2170     3645253 :     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
    2171     1822627 :     (*chunk)->SetDefaultCodePermissions();
    2172             :   }
    2173             :   unprotected_memory_chunks_.clear();
    2174     1777091 : }
    2175             : 
    2176           0 : bool Heap::ExternalStringTable::Contains(String string) {
    2177           0 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2178           0 :     if (young_strings_[i] == string) return true;
    2179             :   }
    2180           0 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2181           0 :     if (old_strings_[i] == string) return true;
    2182             :   }
    2183             :   return false;
    2184             : }
    2185             : 
    2186       88054 : void Heap::UpdateExternalString(String string, size_t old_payload,
    2187             :                                 size_t new_payload) {
    2188             :   DCHECK(string->IsExternalString());
    2189             :   Page* page = Page::FromHeapObject(string);
    2190             : 
    2191       88054 :   if (old_payload > new_payload) {
    2192          17 :     page->DecrementExternalBackingStoreBytes(
    2193          17 :         ExternalBackingStoreType::kExternalString, old_payload - new_payload);
    2194             :   } else {
    2195       88037 :     page->IncrementExternalBackingStoreBytes(
    2196       88037 :         ExternalBackingStoreType::kExternalString, new_payload - old_payload);
    2197             :   }
    2198       88056 : }
    2199             : 
    2200         124 : String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
    2201             :                                                             FullObjectSlot p) {
    2202             :   HeapObject obj = HeapObject::cast(*p);
    2203             :   MapWord first_word = obj->map_word();
    2204             : 
    2205             :   String new_string;
    2206             : 
    2207         124 :   if (InFromPage(obj)) {
    2208         124 :     if (!first_word.IsForwardingAddress()) {
    2209             :       // Unreachable external string can be finalized.
    2210             :       String string = String::cast(obj);
    2211         120 :       if (!string->IsExternalString()) {
    2212             :         // Original external string has been internalized.
    2213             :         DCHECK(string->IsThinString());
    2214           5 :         return String();
    2215             :       }
    2216         115 :       heap->FinalizeExternalString(string);
    2217         115 :       return String();
    2218             :     }
    2219             :     new_string = String::cast(first_word.ToForwardingAddress());
    2220             :   } else {
    2221             :     new_string = String::cast(obj);
    2222             :   }
    2223             : 
    2224             :   // String is still reachable.
    2225           4 :   if (new_string->IsThinString()) {
    2226             :     // Filtering Thin strings out of the external string table.
    2227           0 :     return String();
    2228           4 :   } else if (new_string->IsExternalString()) {
    2229           4 :     MemoryChunk::MoveExternalBackingStoreBytes(
    2230             :         ExternalBackingStoreType::kExternalString,
    2231             :         Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
    2232           8 :         ExternalString::cast(new_string)->ExternalPayloadSize());
    2233           4 :     return new_string;
    2234             :   }
    2235             : 
    2236             :   // Internalization can replace external strings with non-external strings.
    2237           0 :   return new_string->IsExternalString() ? new_string : String();
    2238             : }
    2239             : 
    2240           0 : void Heap::ExternalStringTable::VerifyYoung() {
    2241             : #ifdef DEBUG
    2242             :   std::set<String> visited_map;
    2243             :   std::map<MemoryChunk*, size_t> size_map;
    2244             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2245             :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2246             :     String obj = String::cast(young_strings_[i]);
    2247             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2248             :     DCHECK(mc->InYoungGeneration());
    2249             :     DCHECK(heap_->InYoungGeneration(obj));
    2250             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2251             :     DCHECK(obj->IsExternalString());
    2252             :     // Note: we can have repeated elements in the table.
    2253             :     DCHECK_EQ(0, visited_map.count(obj));
    2254             :     visited_map.insert(obj);
    2255             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2256             :   }
    2257             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2258             :        it != size_map.end(); it++)
    2259             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2260             : #endif
    2261           0 : }
    2262             : 
    2263           0 : void Heap::ExternalStringTable::Verify() {
    2264             : #ifdef DEBUG
    2265             :   std::set<String> visited_map;
    2266             :   std::map<MemoryChunk*, size_t> size_map;
    2267             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2268             :   VerifyYoung();
    2269             :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2270             :     String obj = String::cast(old_strings_[i]);
    2271             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2272             :     DCHECK(!mc->InYoungGeneration());
    2273             :     DCHECK(!heap_->InYoungGeneration(obj));
    2274             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2275             :     DCHECK(obj->IsExternalString());
    2276             :     // Note: we can have repeated elements in the table.
    2277             :     DCHECK_EQ(0, visited_map.count(obj));
    2278             :     visited_map.insert(obj);
    2279             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2280             :   }
    2281             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2282             :        it != size_map.end(); it++)
    2283             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2284             : #endif
    2285           0 : }
    2286             : 
    2287       94944 : void Heap::ExternalStringTable::UpdateYoungReferences(
    2288             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2289       94944 :   if (young_strings_.empty()) return;
    2290             : 
    2291             :   FullObjectSlot start(&young_strings_[0]);
    2292             :   FullObjectSlot end(&young_strings_[young_strings_.size()]);
    2293             :   FullObjectSlot last = start;
    2294             : 
    2295         152 :   for (FullObjectSlot p = start; p < end; ++p) {
    2296         136 :     String target = updater_func(heap_, p);
    2297             : 
    2298         256 :     if (target.is_null()) continue;
    2299             : 
    2300             :     DCHECK(target->IsExternalString());
    2301             : 
    2302          16 :     if (InYoungGeneration(target)) {
    2303             :       // String is still in new space. Update the table entry.
    2304             :       last.store(target);
    2305             :       ++last;
    2306             :     } else {
    2307             :       // String got promoted. Move it to the old string list.
    2308           0 :       old_strings_.push_back(target);
    2309             :     }
    2310             :   }
    2311             : 
    2312             :   DCHECK(last <= end);
    2313          16 :   young_strings_.resize(last - start);
    2314             : #ifdef VERIFY_HEAP
    2315             :   if (FLAG_verify_heap) {
    2316             :     VerifyYoung();
    2317             :   }
    2318             : #endif
    2319             : }
    2320             : 
    2321           0 : void Heap::ExternalStringTable::PromoteYoung() {
    2322           0 :   old_strings_.reserve(old_strings_.size() + young_strings_.size());
    2323             :   std::move(std::begin(young_strings_), std::end(young_strings_),
    2324             :             std::back_inserter(old_strings_));
    2325             :   young_strings_.clear();
    2326           0 : }
    2327             : 
    2328           0 : void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
    2329       70539 :   if (!young_strings_.empty()) {
    2330          28 :     v->VisitRootPointers(
    2331             :         Root::kExternalStringsTable, nullptr,
    2332             :         FullObjectSlot(&young_strings_[0]),
    2333          56 :         FullObjectSlot(&young_strings_[young_strings_.size()]));
    2334             :   }
    2335           0 : }
    2336             : 
    2337       70539 : void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
    2338             :   IterateYoung(v);
    2339       70539 :   if (!old_strings_.empty()) {
    2340       70484 :     v->VisitRootPointers(
    2341             :         Root::kExternalStringsTable, nullptr,
    2342             :         FullObjectSlot(old_strings_.data()),
    2343      140968 :         FullObjectSlot(old_strings_.data() + old_strings_.size()));
    2344             :   }
    2345       70539 : }
    2346             : 
    2347       26098 : void Heap::UpdateYoungReferencesInExternalStringTable(
    2348             :     ExternalStringTableUpdaterCallback updater_func) {
    2349       26098 :   external_string_table_.UpdateYoungReferences(updater_func);
    2350       26098 : }
    2351             : 
    2352       68846 : void Heap::ExternalStringTable::UpdateReferences(
    2353             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2354       68846 :   if (old_strings_.size() > 0) {
    2355             :     FullObjectSlot start(old_strings_.data());
    2356             :     FullObjectSlot end(old_strings_.data() + old_strings_.size());
    2357      171145 :     for (FullObjectSlot p = start; p < end; ++p)
    2358      204818 :       p.store(updater_func(heap_, p));
    2359             :   }
    2360             : 
    2361       68846 :   UpdateYoungReferences(updater_func);
    2362       68846 : }
    2363             : 
    2364       68846 : void Heap::UpdateReferencesInExternalStringTable(
    2365             :     ExternalStringTableUpdaterCallback updater_func) {
    2366       68846 :   external_string_table_.UpdateReferences(updater_func);
    2367       68846 : }
    2368             : 
    2369             : 
    2370       68846 : void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
    2371             :   ProcessNativeContexts(retainer);
    2372             :   ProcessAllocationSites(retainer);
    2373       68846 : }
    2374             : 
    2375             : 
    2376       26098 : void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
    2377             :   ProcessNativeContexts(retainer);
    2378       26098 : }
    2379             : 
    2380             : 
    2381           0 : void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
    2382       94944 :   Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
    2383             :   // Update the head of the list of contexts.
    2384             :   set_native_contexts_list(head);
    2385           0 : }
    2386             : 
    2387             : 
    2388           0 : void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
    2389             :   Object allocation_site_obj =
    2390       68846 :       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
    2391             :   set_allocation_sites_list(allocation_site_obj);
    2392           0 : }
    2393             : 
    2394       68846 : void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
    2395       68846 :   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
    2396       68846 :   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
    2397       68846 : }
    2398             : 
    2399         342 : void Heap::ForeachAllocationSite(
    2400             :     Object list, const std::function<void(AllocationSite)>& visitor) {
    2401             :   DisallowHeapAllocation disallow_heap_allocation;
    2402         342 :   Object current = list;
    2403        2884 :   while (current->IsAllocationSite()) {
    2404             :     AllocationSite site = AllocationSite::cast(current);
    2405             :     visitor(site);
    2406             :     Object current_nested = site->nested_site();
    2407        1327 :     while (current_nested->IsAllocationSite()) {
    2408             :       AllocationSite nested_site = AllocationSite::cast(current_nested);
    2409             :       visitor(nested_site);
    2410             :       current_nested = nested_site->nested_site();
    2411             :     }
    2412             :     current = site->weak_next();
    2413             :   }
    2414         342 : }
    2415             : 
    2416         134 : void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
    2417             :   DisallowHeapAllocation no_allocation_scope;
    2418         134 :   bool marked = false;
    2419             : 
    2420         134 :   ForeachAllocationSite(allocation_sites_list(),
    2421        1178 :                         [&marked, allocation, this](AllocationSite site) {
    2422        1178 :                           if (site->GetAllocationType() == allocation) {
    2423           0 :                             site->ResetPretenureDecision();
    2424             :                             site->set_deopt_dependent_code(true);
    2425           0 :                             marked = true;
    2426             :                             RemoveAllocationSitePretenuringFeedback(site);
    2427             :                             return;
    2428             :                           }
    2429         134 :                         });
    2430         134 :   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
    2431         134 : }
    2432             : 
    2433       68846 : void Heap::EvaluateOldSpaceLocalPretenuring(
    2434             :     uint64_t size_of_objects_before_gc) {
    2435             :   uint64_t size_of_objects_after_gc = SizeOfObjects();
    2436             :   double old_generation_survival_rate =
    2437       68846 :       (static_cast<double>(size_of_objects_after_gc) * 100) /
    2438       68846 :       static_cast<double>(size_of_objects_before_gc);
    2439             : 
    2440       68846 :   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
    2441             :     // Too many objects died in the old generation, pretenuring of wrong
    2442             :     // allocation sites may be the cause for that. We have to deopt all
    2443             :     // dependent code registered in the allocation sites to re-evaluate
    2444             :     // our pretenuring decisions.
    2445         134 :     ResetAllAllocationSitesDependentCode(AllocationType::kOld);
    2446         134 :     if (FLAG_trace_pretenuring) {
    2447             :       PrintF(
    2448             :           "Deopt all allocation sites dependent code due to low survival "
    2449             :           "rate in the old generation %f\n",
    2450           0 :           old_generation_survival_rate);
    2451             :     }
    2452             :   }
    2453       68846 : }
    2454             : 
    2455             : 
    2456           5 : void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
    2457             :   DisallowHeapAllocation no_allocation;
    2458             :   // All external strings are listed in the external string table.
    2459             : 
    2460           5 :   class ExternalStringTableVisitorAdapter : public RootVisitor {
    2461             :    public:
    2462             :     explicit ExternalStringTableVisitorAdapter(
    2463             :         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
    2464           5 :         : isolate_(isolate), visitor_(visitor) {}
    2465           5 :     void VisitRootPointers(Root root, const char* description,
    2466             :                            FullObjectSlot start, FullObjectSlot end) override {
    2467          35 :       for (FullObjectSlot p = start; p < end; ++p) {
    2468             :         DCHECK((*p)->IsExternalString());
    2469          75 :         visitor_->VisitExternalString(
    2470          50 :             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
    2471             :       }
    2472           5 :     }
    2473             : 
    2474             :    private:
    2475             :     Isolate* isolate_;
    2476             :     v8::ExternalResourceVisitor* visitor_;
    2477             :   } external_string_table_visitor(isolate(), visitor);
    2478             : 
    2479           5 :   external_string_table_.IterateAll(&external_string_table_visitor);
    2480           5 : }
    2481             : 
    2482             : STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
    2483             : 
    2484             : #ifdef V8_COMPRESS_POINTERS
    2485             : // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
    2486             : // is only kTaggedSize aligned but we can keep using unaligned access since
    2487             : // both x64 and arm64 architectures (where pointer compression supported)
    2488             : // allow unaligned access to doubles.
    2489             : STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kTaggedSize));
    2490             : #else
    2491             : STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kDoubleAlignment));
    2492             : #endif
    2493             : 
    2494             : #ifdef V8_HOST_ARCH_32_BIT
    2495             : STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
    2496             : #endif
    2497             : 
    2498             : 
    2499          25 : int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
    2500          25 :   switch (alignment) {
    2501             :     case kWordAligned:
    2502             :       return 0;
    2503             :     case kDoubleAligned:
    2504             :     case kDoubleUnaligned:
    2505             :       return kDoubleSize - kTaggedSize;
    2506             :     default:
    2507           0 :       UNREACHABLE();
    2508             :   }
    2509             :   return 0;
    2510             : }
    2511             : 
    2512             : 
    2513    89485836 : int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
    2514    89485836 :   if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
    2515             :     return kTaggedSize;
    2516             :   if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
    2517             :     return kDoubleSize - kTaggedSize;  // No fill if double is always aligned.
    2518             :   return 0;
    2519             : }
    2520             : 
    2521           0 : size_t Heap::GetCodeRangeReservedAreaSize() {
    2522           0 :   return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
    2523             : }
    2524             : 
    2525           0 : HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
    2526           0 :   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
    2527           0 :   return HeapObject::FromAddress(object->address() + filler_size);
    2528             : }
    2529             : 
    2530           0 : HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
    2531             :                                  int allocation_size,
    2532             :                                  AllocationAlignment alignment) {
    2533           0 :   int filler_size = allocation_size - object_size;
    2534             :   DCHECK_LT(0, filler_size);
    2535             :   int pre_filler = GetFillToAlign(object->address(), alignment);
    2536           0 :   if (pre_filler) {
    2537             :     object = PrecedeWithFiller(object, pre_filler);
    2538           0 :     filler_size -= pre_filler;
    2539             :   }
    2540           0 :   if (filler_size) {
    2541             :     CreateFillerObjectAt(object->address() + object_size, filler_size,
    2542           0 :                          ClearRecordedSlots::kNo);
    2543             :   }
    2544           0 :   return object;
    2545             : }
    2546             : 
    2547      519436 : void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
    2548      519436 :   ArrayBufferTracker::RegisterNew(this, buffer);
    2549      519473 : }
    2550             : 
    2551        5327 : void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
    2552        5327 :   ArrayBufferTracker::Unregister(this, buffer);
    2553        5327 : }
    2554             : 
    2555       94944 : void Heap::ConfigureInitialOldGenerationSize() {
    2556      103265 :   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
    2557             :     const size_t new_limit =
    2558       16582 :         Max(OldGenerationSizeOfObjects() +
    2559       16582 :                 heap_controller()->MinimumAllocationLimitGrowingStep(
    2560             :                     CurrentHeapGrowingMode()),
    2561             :             static_cast<size_t>(
    2562       16582 :                 static_cast<double>(old_generation_allocation_limit_) *
    2563        8291 :                 (tracer()->AverageSurvivalRatio() / 100)));
    2564        8291 :     if (new_limit < old_generation_allocation_limit_) {
    2565        7586 :       old_generation_allocation_limit_ = new_limit;
    2566             :     } else {
    2567         705 :       old_generation_size_configured_ = true;
    2568             :     }
    2569             :   }
    2570       94944 : }
    2571             : 
    2572       68846 : void Heap::FlushNumberStringCache() {
    2573             :   // Flush the number to string cache.
    2574             :   int len = number_string_cache()->length();
    2575   664701166 :   for (int i = 0; i < len; i++) {
    2576   332316160 :     number_string_cache()->set_undefined(i);
    2577             :   }
    2578       68846 : }
    2579             : 
    2580    88560571 : HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
    2581             :                                       ClearRecordedSlots clear_slots_mode,
    2582             :                                       ClearFreedMemoryMode clear_memory_mode) {
    2583    88560571 :   if (size == 0) return HeapObject();
    2584             :   HeapObject filler = HeapObject::FromAddress(addr);
    2585    87332072 :   if (size == kTaggedSize) {
    2586             :     filler->set_map_after_allocation(
    2587             :         Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
    2588             :         SKIP_WRITE_BARRIER);
    2589    84139068 :   } else if (size == 2 * kTaggedSize) {
    2590             :     filler->set_map_after_allocation(
    2591             :         Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
    2592             :         SKIP_WRITE_BARRIER);
    2593     3221227 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2594       37456 :       Memory<Tagged_t>(addr + kTaggedSize) =
    2595       37456 :           static_cast<Tagged_t>(kClearedFreeMemoryValue);
    2596             :     }
    2597             :   } else {
    2598             :     DCHECK_GT(size, 2 * kTaggedSize);
    2599             :     filler->set_map_after_allocation(
    2600             :         Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
    2601             :         SKIP_WRITE_BARRIER);
    2602             :     FreeSpace::cast(filler)->relaxed_write_size(size);
    2603    80917841 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2604      193202 :       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
    2605      193202 :                    (size / kTaggedSize) - 2);
    2606             :     }
    2607             :   }
    2608    87304619 :   if (clear_slots_mode == ClearRecordedSlots::kYes) {
    2609     1763410 :     ClearRecordedSlotRange(addr, addr + size);
    2610             :   }
    2611             : 
    2612             :   // At this point, we may be deserializing the heap from a snapshot, and
    2613             :   // none of the maps have been created yet and are nullptr.
    2614             :   DCHECK((filler->map_slot().contains_value(kNullAddress) &&
    2615             :           !deserialization_complete_) ||
    2616             :          filler->map()->IsMap());
    2617    87304619 :   return filler;
    2618             : }
    2619             : 
    2620      181562 : bool Heap::CanMoveObjectStart(HeapObject object) {
    2621      181562 :   if (!FLAG_move_object_start) return false;
    2622             : 
    2623             :   // Sampling heap profiler may have a reference to the object.
    2624      181562 :   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
    2625             : 
    2626      181562 :   if (IsLargeObject(object)) return false;
    2627             : 
    2628             :   // We can move the object start if the page was already swept.
    2629        2097 :   return Page::FromHeapObject(object)->SweepingDone();
    2630             : }
    2631             : 
    2632       44784 : bool Heap::IsImmovable(HeapObject object) {
    2633             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    2634      169767 :   return chunk->NeverEvacuate() || IsLargeObject(object);
    2635             : }
    2636             : 
    2637      782711 : bool Heap::IsLargeObject(HeapObject object) {
    2638      782711 :   return MemoryChunk::FromHeapObject(object)->IsLargePage();
    2639             : }
    2640             : 
    2641             : #ifdef ENABLE_SLOW_DCHECKS
    2642             : namespace {
    2643             : 
    2644             : class LeftTrimmerVerifierRootVisitor : public RootVisitor {
    2645             :  public:
    2646             :   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
    2647             :       : to_check_(to_check) {}
    2648             : 
    2649             :   void VisitRootPointers(Root root, const char* description,
    2650             :                          FullObjectSlot start, FullObjectSlot end) override {
    2651             :     for (FullObjectSlot p = start; p < end; ++p) {
    2652             :       DCHECK_NE(*p, to_check_);
    2653             :     }
    2654             :   }
    2655             : 
    2656             :  private:
    2657             :   FixedArrayBase to_check_;
    2658             : 
    2659             :   DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
    2660             : };
    2661             : }  // namespace
    2662             : #endif  // ENABLE_SLOW_DCHECKS
    2663             : 
    2664             : namespace {
    2665       67003 : bool MayContainRecordedSlots(HeapObject object) {
    2666             :   // New space object do not have recorded slots.
    2667       67003 :   if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
    2668             :   // Whitelist objects that definitely do not have pointers.
    2669        7806 :   if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
    2670             :   // Conservatively return true for other objects.
    2671        3903 :   return true;
    2672             : }
    2673             : }  // namespace
    2674             : 
    2675      952435 : void Heap::OnMoveEvent(HeapObject target, HeapObject source,
    2676             :                        int size_in_bytes) {
    2677      952435 :   HeapProfiler* heap_profiler = isolate_->heap_profiler();
    2678      952435 :   if (heap_profiler->is_tracking_object_moves()) {
    2679             :     heap_profiler->ObjectMoveEvent(source->address(), target->address(),
    2680      136010 :                                    size_in_bytes);
    2681             :   }
    2682     1111170 :   for (auto& tracker : allocation_trackers_) {
    2683      312626 :     tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
    2684             :   }
    2685      954857 :   if (target->IsSharedFunctionInfo()) {
    2686        4343 :     LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
    2687             :                                                          target->address()));
    2688             :   }
    2689             : 
    2690             :   if (FLAG_verify_predictable) {
    2691             :     ++allocations_count_;
    2692             :     // Advance synthetic time by making a time request.
    2693             :     MonotonicallyIncreasingTimeInMs();
    2694             : 
    2695             :     UpdateAllocationsHash(source);
    2696             :     UpdateAllocationsHash(target);
    2697             :     UpdateAllocationsHash(size_in_bytes);
    2698             : 
    2699             :     if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
    2700             :       PrintAllocationsHash();
    2701             :     }
    2702      954528 :   } else if (FLAG_fuzzer_gc_analysis) {
    2703           0 :     ++allocations_count_;
    2704             :   }
    2705      954528 : }
    2706             : 
    2707        2169 : FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
    2708             :                                         int elements_to_trim) {
    2709        2169 :   if (elements_to_trim == 0) {
    2710             :     // This simplifies reasoning in the rest of the function.
    2711           0 :     return object;
    2712             :   }
    2713        2169 :   CHECK(!object.is_null());
    2714             :   DCHECK(CanMoveObjectStart(object));
    2715             :   // Add custom visitor to concurrent marker if new left-trimmable type
    2716             :   // is added.
    2717             :   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
    2718             :   const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
    2719        2169 :   const int bytes_to_trim = elements_to_trim * element_size;
    2720             :   Map map = object->map();
    2721             : 
    2722             :   // For now this trick is only applied to fixed arrays which may be in new
    2723             :   // space or old space. In a large object space the object's start must
    2724             :   // coincide with chunk and thus the trick is just not applicable.
    2725             :   DCHECK(!IsLargeObject(object));
    2726             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2727             : 
    2728             :   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
    2729             :   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
    2730             :   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
    2731             : 
    2732             :   const int len = object->length();
    2733             :   DCHECK(elements_to_trim <= len);
    2734             : 
    2735             :   // Calculate location of new array start.
    2736             :   Address old_start = object->address();
    2737        2169 :   Address new_start = old_start + bytes_to_trim;
    2738             : 
    2739        2169 :   if (incremental_marking()->IsMarking()) {
    2740             :     incremental_marking()->NotifyLeftTrimming(
    2741         161 :         object, HeapObject::FromAddress(new_start));
    2742             :   }
    2743             : 
    2744             :   // Technically in new space this write might be omitted (except for
    2745             :   // debug mode which iterates through the heap), but to play safer
    2746             :   // we still do it.
    2747             :   HeapObject filler =
    2748        2169 :       CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
    2749             : 
    2750             :   // Initialize header of the trimmed array. Since left trimming is only
    2751             :   // performed on pages which are not concurrently swept creating a filler
    2752             :   // object does not require synchronization.
    2753        2169 :   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
    2754        4338 :   RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
    2755             :                       Smi::FromInt(len - elements_to_trim));
    2756             : 
    2757             :   FixedArrayBase new_object =
    2758        2169 :       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
    2759             : 
    2760             :   // Remove recorded slots for the new map and length offset.
    2761             :   ClearRecordedSlot(new_object, new_object.RawField(0));
    2762             :   ClearRecordedSlot(new_object,
    2763             :                     new_object.RawField(FixedArrayBase::kLengthOffset));
    2764             : 
    2765             :   // Handle invalidated old-to-old slots.
    2766        2174 :   if (incremental_marking()->IsCompacting() &&
    2767           5 :       MayContainRecordedSlots(new_object)) {
    2768             :     // If the array was right-trimmed before, then it is registered in
    2769             :     // the invalidated_slots.
    2770             :     MemoryChunk::FromHeapObject(new_object)
    2771           5 :         ->MoveObjectWithInvalidatedSlots(filler, new_object);
    2772             :     // We have to clear slots in the free space to avoid stale old-to-old slots.
    2773             :     // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
    2774             :     // we need pointer granularity writes to avoid race with the concurrent
    2775             :     // marking.
    2776           5 :     if (filler->Size() > FreeSpace::kSize) {
    2777           5 :       MemsetTagged(filler.RawField(FreeSpace::kSize),
    2778             :                    ReadOnlyRoots(this).undefined_value(),
    2779           5 :                    (filler->Size() - FreeSpace::kSize) / kTaggedSize);
    2780             :     }
    2781             :   }
    2782             :   // Notify the heap profiler of change in object layout.
    2783        2169 :   OnMoveEvent(new_object, object, new_object->Size());
    2784             : 
    2785             : #ifdef ENABLE_SLOW_DCHECKS
    2786             :   if (FLAG_enable_slow_asserts) {
    2787             :     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
    2788             :     // to the original FixedArray (which is now the filler object).
    2789             :     LeftTrimmerVerifierRootVisitor root_visitor(object);
    2790             :     ReadOnlyRoots(this).Iterate(&root_visitor);
    2791             :     IterateRoots(&root_visitor, VISIT_ALL);
    2792             :   }
    2793             : #endif  // ENABLE_SLOW_DCHECKS
    2794             : 
    2795        2169 :   return new_object;
    2796             : }
    2797             : 
    2798     1443797 : void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
    2799             :   const int len = object->length();
    2800             :   DCHECK_LE(elements_to_trim, len);
    2801             :   DCHECK_GE(elements_to_trim, 0);
    2802             : 
    2803             :   int bytes_to_trim;
    2804             :   DCHECK(!object->IsFixedTypedArrayBase());
    2805     1443797 :   if (object->IsByteArray()) {
    2806        5120 :     int new_size = ByteArray::SizeFor(len - elements_to_trim);
    2807        5120 :     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
    2808             :     DCHECK_GE(bytes_to_trim, 0);
    2809     1438677 :   } else if (object->IsFixedArray()) {
    2810     1414317 :     CHECK_NE(elements_to_trim, len);
    2811     1414317 :     bytes_to_trim = elements_to_trim * kTaggedSize;
    2812             :   } else {
    2813             :     DCHECK(object->IsFixedDoubleArray());
    2814       24360 :     CHECK_NE(elements_to_trim, len);
    2815       24360 :     bytes_to_trim = elements_to_trim * kDoubleSize;
    2816             :   }
    2817             : 
    2818     1443797 :   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
    2819     1443797 : }
    2820             : 
    2821       17399 : void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
    2822             :                                    int elements_to_trim) {
    2823             :   // This function is safe to use only at the end of the mark compact
    2824             :   // collection: When marking, we record the weak slots, and shrinking
    2825             :   // invalidates them.
    2826             :   DCHECK_EQ(gc_state(), MARK_COMPACT);
    2827       17399 :   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
    2828       17399 :                                        elements_to_trim * kTaggedSize);
    2829       17399 : }
    2830             : 
    2831             : template <typename T>
    2832     1461196 : void Heap::CreateFillerForArray(T object, int elements_to_trim,
    2833             :                                 int bytes_to_trim) {
    2834             :   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
    2835             :          object->IsWeakFixedArray());
    2836             : 
    2837             :   // For now this trick is only applied to objects in new and paged space.
    2838             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2839             : 
    2840     1461196 :   if (bytes_to_trim == 0) {
    2841             :     DCHECK_EQ(elements_to_trim, 0);
    2842             :     // No need to create filler and update live bytes counters.
    2843             :     return;
    2844             :   }
    2845             : 
    2846             :   // Calculate location of new array end.
    2847     1461196 :   int old_size = object->Size();
    2848     1461196 :   Address old_end = object->address() + old_size;
    2849     1461196 :   Address new_end = old_end - bytes_to_trim;
    2850             : 
    2851             :   // Register the array as an object with invalidated old-to-old slots. We
    2852             :   // cannot use NotifyObjectLayoutChange as it would mark the array black,
    2853             :   // which is not safe for left-trimming because left-trimming re-pushes
    2854             :   // only grey arrays onto the marking worklist.
    2855     1463197 :   if (incremental_marking()->IsCompacting() &&
    2856        2001 :       MayContainRecordedSlots(object)) {
    2857             :     // Ensure that the object survives because the InvalidatedSlotsFilter will
    2858             :     // compute its size from its map during pointers updating phase.
    2859          20 :     incremental_marking()->WhiteToGreyAndPush(object);
    2860          20 :     MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    2861             :         object, old_size);
    2862             :   }
    2863             : 
    2864             :   // Technically in new space this write might be omitted (except for
    2865             :   // debug mode which iterates through the heap), but to play safer
    2866             :   // we still do it.
    2867             :   // We do not create a filler for objects in a large object space.
    2868     1461196 :   if (!IsLargeObject(object)) {
    2869             :     HeapObject filler =
    2870     1460582 :         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
    2871             :     DCHECK(!filler.is_null());
    2872             :     // Clear the mark bits of the black area that belongs now to the filler.
    2873             :     // This is an optimization. The sweeper will release black fillers anyway.
    2874     1584944 :     if (incremental_marking()->black_allocation() &&
    2875             :         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
    2876             :       Page* page = Page::FromAddress(new_end);
    2877         314 :       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
    2878             :           page->AddressToMarkbitIndex(new_end),
    2879             :           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
    2880             :     }
    2881             :   }
    2882             : 
    2883             :   // Initialize header of the trimmed array. We are storing the new length
    2884             :   // using release store after creating a filler for the left-over space to
    2885             :   // avoid races with the sweeper thread.
    2886     1461196 :   object->synchronized_set_length(object->length() - elements_to_trim);
    2887             : 
    2888             :   // Notify the heap object allocation tracker of change in object layout. The
    2889             :   // array may not be moved during GC, and size has to be adjusted nevertheless.
    2890     1464052 :   for (auto& tracker : allocation_trackers_) {
    2891        5712 :     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
    2892             :   }
    2893             : }
    2894             : 
    2895           0 : void Heap::MakeHeapIterable() {
    2896        7875 :   mark_compact_collector()->EnsureSweepingCompleted();
    2897           0 : }
    2898             : 
    2899             : 
    2900             : static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
    2901             :   const double kMinMutatorUtilization = 0.0;
    2902             :   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
    2903       26131 :   if (mutator_speed == 0) return kMinMutatorUtilization;
    2904       23670 :   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
    2905             :   // Derivation:
    2906             :   // mutator_utilization = mutator_time / (mutator_time + gc_time)
    2907             :   // mutator_time = 1 / mutator_speed
    2908             :   // gc_time = 1 / gc_speed
    2909             :   // mutator_utilization = (1 / mutator_speed) /
    2910             :   //                       (1 / mutator_speed + 1 / gc_speed)
    2911             :   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
    2912       23670 :   return gc_speed / (mutator_speed + gc_speed);
    2913             : }
    2914             : 
    2915             : 
    2916       26119 : double Heap::YoungGenerationMutatorUtilization() {
    2917             :   double mutator_speed = static_cast<double>(
    2918       26119 :       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
    2919             :   double gc_speed =
    2920       26119 :       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
    2921             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2922       26119 :   if (FLAG_trace_mutator_utilization) {
    2923             :     isolate()->PrintWithTimestamp(
    2924             :         "Young generation mutator utilization = %.3f ("
    2925             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2926           0 :         result, mutator_speed, gc_speed);
    2927             :   }
    2928       26119 :   return result;
    2929             : }
    2930             : 
    2931             : 
    2932          12 : double Heap::OldGenerationMutatorUtilization() {
    2933             :   double mutator_speed = static_cast<double>(
    2934          12 :       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
    2935             :   double gc_speed = static_cast<double>(
    2936          12 :       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
    2937             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2938          12 :   if (FLAG_trace_mutator_utilization) {
    2939             :     isolate()->PrintWithTimestamp(
    2940             :         "Old generation mutator utilization = %.3f ("
    2941             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2942           0 :         result, mutator_speed, gc_speed);
    2943             :   }
    2944          12 :   return result;
    2945             : }
    2946             : 
    2947             : 
    2948           0 : bool Heap::HasLowYoungGenerationAllocationRate() {
    2949             :   const double high_mutator_utilization = 0.993;
    2950       26119 :   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
    2951             : }
    2952             : 
    2953             : 
    2954           0 : bool Heap::HasLowOldGenerationAllocationRate() {
    2955             :   const double high_mutator_utilization = 0.993;
    2956          12 :   return OldGenerationMutatorUtilization() > high_mutator_utilization;
    2957             : }
    2958             : 
    2959             : 
    2960          21 : bool Heap::HasLowAllocationRate() {
    2961          33 :   return HasLowYoungGenerationAllocationRate() &&
    2962          21 :          HasLowOldGenerationAllocationRate();
    2963             : }
    2964             : 
    2965           0 : bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
    2966             :                                     double mutator_utilization) {
    2967             :   const double kHighHeapPercentage = 0.8;
    2968             :   const double kLowMutatorUtilization = 0.4;
    2969       68127 :   return old_generation_size >=
    2970       68127 :              kHighHeapPercentage * max_old_generation_size_ &&
    2971           0 :          mutator_utilization < kLowMutatorUtilization;
    2972             : }
    2973             : 
    2974       68846 : void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
    2975             :                                        double mutator_utilization) {
    2976             :   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
    2977       68846 :   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
    2978       68127 :   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
    2979       68127 :     consecutive_ineffective_mark_compacts_ = 0;
    2980       68127 :     return;
    2981             :   }
    2982           0 :   ++consecutive_ineffective_mark_compacts_;
    2983           0 :   if (consecutive_ineffective_mark_compacts_ ==
    2984             :       kMaxConsecutiveIneffectiveMarkCompacts) {
    2985           0 :     if (InvokeNearHeapLimitCallback()) {
    2986             :       // The callback increased the heap limit.
    2987           0 :       consecutive_ineffective_mark_compacts_ = 0;
    2988           0 :       return;
    2989             :     }
    2990           0 :     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
    2991             :   }
    2992             : }
    2993             : 
    2994           0 : bool Heap::HasHighFragmentation() {
    2995           0 :   size_t used = OldGenerationSizeOfObjects();
    2996           0 :   size_t committed = CommittedOldGenerationMemory();
    2997           0 :   return HasHighFragmentation(used, committed);
    2998             : }
    2999             : 
    3000           0 : bool Heap::HasHighFragmentation(size_t used, size_t committed) {
    3001             :   const size_t kSlack = 16 * MB;
    3002             :   // Fragmentation is high if committed > 2 * used + kSlack.
    3003             :   // Rewrite the exression to avoid overflow.
    3004             :   DCHECK_GE(committed, used);
    3005       68489 :   return committed - used > used + kSlack;
    3006             : }
    3007             : 
    3008     1845123 : bool Heap::ShouldOptimizeForMemoryUsage() {
    3009     1845123 :   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
    3010     1845124 :   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
    3011     5535365 :          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
    3012     3690240 :          !CanExpandOldGeneration(kOldGenerationSlack);
    3013             : }
    3014             : 
    3015           0 : void Heap::ActivateMemoryReducerIfNeeded() {
    3016             :   // Activate memory reducer when switching to background if
    3017             :   // - there was no mark compact since the start.
    3018             :   // - the committed memory can be potentially reduced.
    3019             :   // 2 pages for the old, code, and map space + 1 page for new space.
    3020             :   const int kMinCommittedMemory = 7 * Page::kPageSize;
    3021           0 :   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
    3022             :       isolate()->IsIsolateInBackground()) {
    3023             :     MemoryReducer::Event event;
    3024           0 :     event.type = MemoryReducer::kPossibleGarbage;
    3025           0 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3026           0 :     memory_reducer_->NotifyPossibleGarbage(event);
    3027             :   }
    3028           0 : }
    3029             : 
    3030       94944 : void Heap::ReduceNewSpaceSize() {
    3031             :   // TODO(ulan): Unify this constant with the similar constant in
    3032             :   // GCIdleTimeHandler once the change is merged to 4.5.
    3033             :   static const size_t kLowAllocationThroughput = 1000;
    3034             :   const double allocation_throughput =
    3035       94944 :       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
    3036             : 
    3037       94944 :   if (FLAG_predictable) return;
    3038             : 
    3039      188932 :   if (ShouldReduceMemory() ||
    3040       72505 :       ((allocation_throughput != 0) &&
    3041             :        (allocation_throughput < kLowAllocationThroughput))) {
    3042       15933 :     new_space_->Shrink();
    3043       31866 :     new_lo_space_->SetCapacity(new_space_->Capacity());
    3044             :     UncommitFromSpace();
    3045             :   }
    3046             : }
    3047             : 
    3048       28674 : void Heap::FinalizeIncrementalMarkingIfComplete(
    3049             :     GarbageCollectionReason gc_reason) {
    3050       85295 :   if (incremental_marking()->IsMarking() &&
    3051       19798 :       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
    3052        7369 :        (!incremental_marking()->finalize_marking_completed() &&
    3053        7452 :         mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3054          83 :         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
    3055        8232 :     FinalizeIncrementalMarkingIncrementally(gc_reason);
    3056       48994 :   } else if (incremental_marking()->IsComplete() ||
    3057        8867 :              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3058             :               local_embedder_heap_tracer()
    3059         757 :                   ->ShouldFinalizeIncrementalMarking())) {
    3060       13089 :     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3061             :   }
    3062       28674 : }
    3063             : 
    3064           5 : void Heap::FinalizeIncrementalMarkingAtomically(
    3065             :     GarbageCollectionReason gc_reason) {
    3066             :   DCHECK(!incremental_marking()->IsStopped());
    3067        2741 :   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3068           5 : }
    3069             : 
    3070       19806 : void Heap::FinalizeIncrementalMarkingIncrementally(
    3071             :     GarbageCollectionReason gc_reason) {
    3072       19806 :   if (FLAG_trace_incremental_marking) {
    3073           0 :     isolate()->PrintWithTimestamp(
    3074             :         "[IncrementalMarking] (%s).\n",
    3075           0 :         Heap::GarbageCollectionReasonToString(gc_reason));
    3076             :   }
    3077             : 
    3078             :   HistogramTimerScope incremental_marking_scope(
    3079             :       isolate()->counters()->gc_incremental_marking_finalize());
    3080       59418 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
    3081       79224 :   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
    3082             : 
    3083             :   {
    3084             :     GCCallbacksScope scope(this);
    3085       19806 :     if (scope.CheckReenter()) {
    3086             :       AllowHeapAllocation allow_allocation;
    3087       79224 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
    3088       39612 :       VMState<EXTERNAL> state(isolate_);
    3089       19806 :       HandleScope handle_scope(isolate_);
    3090       19806 :       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3091             :     }
    3092             :   }
    3093       19806 :   incremental_marking()->FinalizeIncrementally();
    3094             :   {
    3095             :     GCCallbacksScope scope(this);
    3096       19806 :     if (scope.CheckReenter()) {
    3097             :       AllowHeapAllocation allow_allocation;
    3098       79224 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
    3099       39612 :       VMState<EXTERNAL> state(isolate_);
    3100       19806 :       HandleScope handle_scope(isolate_);
    3101       19806 :       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3102             :     }
    3103             :   }
    3104       19806 : }
    3105             : 
    3106       91885 : void Heap::RegisterDeserializedObjectsForBlackAllocation(
    3107             :     Reservation* reservations, const std::vector<HeapObject>& large_objects,
    3108             :     const std::vector<Address>& maps) {
    3109             :   // TODO(ulan): pause black allocation during deserialization to avoid
    3110             :   // iterating all these objects in one go.
    3111             : 
    3112       91885 :   if (!incremental_marking()->black_allocation()) return;
    3113             : 
    3114             :   // Iterate black objects in old space, code space, map space, and large
    3115             :   // object space for side effects.
    3116             :   IncrementalMarking::MarkingState* marking_state =
    3117             :       incremental_marking()->marking_state();
    3118      121244 :   for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
    3119       53889 :     const Heap::Reservation& res = reservations[i];
    3120      444304 :     for (auto& chunk : res) {
    3121      390420 :       Address addr = chunk.start;
    3122    31848232 :       while (addr < chunk.end) {
    3123    15728911 :         HeapObject obj = HeapObject::FromAddress(addr);
    3124             :         // Objects can have any color because incremental marking can
    3125             :         // start in the middle of Heap::ReserveSpace().
    3126    15728911 :         if (marking_state->IsBlack(obj)) {
    3127    15726467 :           incremental_marking()->ProcessBlackAllocatedObject(obj);
    3128             :         }
    3129    15728921 :         addr += obj->Size();
    3130             :       }
    3131             :     }
    3132             :   }
    3133             : 
    3134             :   // Large object space doesn't use reservations, so it needs custom handling.
    3135       13491 :   for (HeapObject object : large_objects) {
    3136          20 :     incremental_marking()->ProcessBlackAllocatedObject(object);
    3137             :   }
    3138             : 
    3139             :   // Map space doesn't use reservations, so it needs custom handling.
    3140     3357533 :   for (Address addr : maps) {
    3141             :     incremental_marking()->ProcessBlackAllocatedObject(
    3142     3344064 :         HeapObject::FromAddress(addr));
    3143             :   }
    3144             : }
    3145             : 
    3146    31822479 : void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
    3147             :                                     const DisallowHeapAllocation&) {
    3148    31822479 :   if (incremental_marking()->IsMarking()) {
    3149     3305031 :     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
    3150     3370028 :     if (incremental_marking()->IsCompacting() &&
    3151       64997 :         MayContainRecordedSlots(object)) {
    3152             :       MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    3153        3878 :           object, size);
    3154             :     }
    3155             :   }
    3156             : #ifdef VERIFY_HEAP
    3157             :   if (FLAG_verify_heap) {
    3158             :     DCHECK(pending_layout_change_object_.is_null());
    3159             :     pending_layout_change_object_ = object;
    3160             :   }
    3161             : #endif
    3162    31822479 : }
    3163             : 
    3164             : #ifdef VERIFY_HEAP
    3165             : // Helper class for collecting slot addresses.
    3166             : class SlotCollectingVisitor final : public ObjectVisitor {
    3167             :  public:
    3168             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3169             :                      ObjectSlot end) override {
    3170             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3171             :   }
    3172             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3173             :                      MaybeObjectSlot end) final {
    3174             :     for (MaybeObjectSlot p = start; p < end; ++p) {
    3175             :       slots_.push_back(p);
    3176             :     }
    3177             :   }
    3178             : 
    3179             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    3180             : 
    3181             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3182             :     UNREACHABLE();
    3183             :   }
    3184             : 
    3185             :   int number_of_slots() { return static_cast<int>(slots_.size()); }
    3186             : 
    3187             :   MaybeObjectSlot slot(int i) { return slots_[i]; }
    3188             : 
    3189             :  private:
    3190             :   std::vector<MaybeObjectSlot> slots_;
    3191             : };
    3192             : 
    3193             : void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
    3194             :   if (!FLAG_verify_heap) return;
    3195             : 
    3196             :   // Check that Heap::NotifyObjectLayout was called for object transitions
    3197             :   // that are not safe for concurrent marking.
    3198             :   // If you see this check triggering for a freshly allocated object,
    3199             :   // use object->set_map_after_allocation() to initialize its map.
    3200             :   if (pending_layout_change_object_.is_null()) {
    3201             :     if (object->IsJSObject()) {
    3202             :       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
    3203             :     } else {
    3204             :       // Check that the set of slots before and after the transition match.
    3205             :       SlotCollectingVisitor old_visitor;
    3206             :       object->IterateFast(&old_visitor);
    3207             :       MapWord old_map_word = object->map_word();
    3208             :       // Temporarily set the new map to iterate new slots.
    3209             :       object->set_map_word(MapWord::FromMap(new_map));
    3210             :       SlotCollectingVisitor new_visitor;
    3211             :       object->IterateFast(&new_visitor);
    3212             :       // Restore the old map.
    3213             :       object->set_map_word(old_map_word);
    3214             :       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
    3215             :       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
    3216             :         DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
    3217             :       }
    3218             :     }
    3219             :   } else {
    3220             :     DCHECK_EQ(pending_layout_change_object_, object);
    3221             :     pending_layout_change_object_ = HeapObject();
    3222             :   }
    3223             : }
    3224             : #endif
    3225             : 
    3226         479 : GCIdleTimeHeapState Heap::ComputeHeapState() {
    3227             :   GCIdleTimeHeapState heap_state;
    3228         479 :   heap_state.contexts_disposed = contexts_disposed_;
    3229             :   heap_state.contexts_disposal_rate =
    3230         479 :       tracer()->ContextDisposalRateInMilliseconds();
    3231         479 :   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
    3232         479 :   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
    3233         479 :   return heap_state;
    3234             : }
    3235             : 
    3236             : 
    3237         479 : bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
    3238             :                                  GCIdleTimeHeapState heap_state,
    3239             :                                  double deadline_in_ms) {
    3240             :   bool result = false;
    3241         479 :   switch (action) {
    3242             :     case GCIdleTimeAction::kDone:
    3243             :       result = true;
    3244         245 :       break;
    3245             :     case GCIdleTimeAction::kIncrementalStep: {
    3246             :       incremental_marking()->AdvanceWithDeadline(
    3247             :           deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
    3248          50 :           StepOrigin::kTask);
    3249             :       FinalizeIncrementalMarkingIfComplete(
    3250          50 :           GarbageCollectionReason::kFinalizeMarkingViaTask);
    3251             :       result = incremental_marking()->IsStopped();
    3252          50 :       break;
    3253             :     }
    3254             :     case GCIdleTimeAction::kFullGC: {
    3255             :       DCHECK_LT(0, contexts_disposed_);
    3256         184 :       HistogramTimerScope scope(isolate_->counters()->gc_context());
    3257         552 :       TRACE_EVENT0("v8", "V8.GCContext");
    3258             :       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
    3259             :       break;
    3260             :     }
    3261             :   }
    3262             : 
    3263         479 :   return result;
    3264             : }
    3265             : 
    3266         479 : void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
    3267             :                                     GCIdleTimeHeapState heap_state,
    3268             :                                     double start_ms, double deadline_in_ms) {
    3269         479 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3270             :   double current_time = MonotonicallyIncreasingTimeInMs();
    3271         479 :   last_idle_notification_time_ = current_time;
    3272         479 :   double deadline_difference = deadline_in_ms - current_time;
    3273             : 
    3274         479 :   contexts_disposed_ = 0;
    3275             : 
    3276         479 :   if (FLAG_trace_idle_notification) {
    3277           0 :     isolate_->PrintWithTimestamp(
    3278             :         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
    3279             :         "ms, deadline usage %.2f ms [",
    3280             :         idle_time_in_ms, idle_time_in_ms - deadline_difference,
    3281           0 :         deadline_difference);
    3282           0 :     switch (action) {
    3283             :       case GCIdleTimeAction::kDone:
    3284           0 :         PrintF("done");
    3285           0 :         break;
    3286             :       case GCIdleTimeAction::kIncrementalStep:
    3287           0 :         PrintF("incremental step");
    3288           0 :         break;
    3289             :       case GCIdleTimeAction::kFullGC:
    3290           0 :         PrintF("full GC");
    3291           0 :         break;
    3292             :     }
    3293           0 :     PrintF("]");
    3294           0 :     if (FLAG_trace_idle_notification_verbose) {
    3295           0 :       PrintF("[");
    3296           0 :       heap_state.Print();
    3297           0 :       PrintF("]");
    3298             :     }
    3299           0 :     PrintF("\n");
    3300             :   }
    3301         479 : }
    3302             : 
    3303             : 
    3304    26541707 : double Heap::MonotonicallyIncreasingTimeInMs() {
    3305    29576060 :   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
    3306    29578263 :          static_cast<double>(base::Time::kMillisecondsPerSecond);
    3307             : }
    3308             : 
    3309             : 
    3310           0 : bool Heap::IdleNotification(int idle_time_in_ms) {
    3311           0 :   return IdleNotification(
    3312           0 :       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
    3313           0 :       (static_cast<double>(idle_time_in_ms) /
    3314           0 :        static_cast<double>(base::Time::kMillisecondsPerSecond)));
    3315             : }
    3316             : 
    3317             : 
    3318         479 : bool Heap::IdleNotification(double deadline_in_seconds) {
    3319         479 :   CHECK(HasBeenSetUp());
    3320             :   double deadline_in_ms =
    3321             :       deadline_in_seconds *
    3322         479 :       static_cast<double>(base::Time::kMillisecondsPerSecond);
    3323             :   HistogramTimerScope idle_notification_scope(
    3324         479 :       isolate_->counters()->gc_idle_notification());
    3325        1437 :   TRACE_EVENT0("v8", "V8.GCIdleNotification");
    3326             :   double start_ms = MonotonicallyIncreasingTimeInMs();
    3327         479 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3328             : 
    3329             :   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
    3330         479 :                              OldGenerationAllocationCounter());
    3331             : 
    3332         479 :   GCIdleTimeHeapState heap_state = ComputeHeapState();
    3333             : 
    3334             :   GCIdleTimeAction action =
    3335         479 :       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
    3336             : 
    3337         479 :   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
    3338             : 
    3339         479 :   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
    3340         479 :   return result;
    3341             : }
    3342             : 
    3343             : 
    3344           0 : bool Heap::RecentIdleNotificationHappened() {
    3345           0 :   return (last_idle_notification_time_ +
    3346             :           GCIdleTimeHandler::kMaxScheduledIdleTime) >
    3347           0 :          MonotonicallyIncreasingTimeInMs();
    3348             : }
    3349             : 
    3350             : class MemoryPressureInterruptTask : public CancelableTask {
    3351             :  public:
    3352             :   explicit MemoryPressureInterruptTask(Heap* heap)
    3353          11 :       : CancelableTask(heap->isolate()), heap_(heap) {}
    3354             : 
    3355          22 :   ~MemoryPressureInterruptTask() override = default;
    3356             : 
    3357             :  private:
    3358             :   // v8::internal::CancelableTask overrides.
    3359          11 :   void RunInternal() override { heap_->CheckMemoryPressure(); }
    3360             : 
    3361             :   Heap* heap_;
    3362             :   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
    3363             : };
    3364             : 
    3365     1846139 : void Heap::CheckMemoryPressure() {
    3366     1846139 :   if (HighMemoryPressure()) {
    3367             :     // The optimizing compiler may be unnecessarily holding on to memory.
    3368        2516 :     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    3369             :   }
    3370             :   MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
    3371             :   // Reset the memory pressure level to avoid recursive GCs triggered by
    3372             :   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
    3373             :   // the finalizers.
    3374             :   memory_pressure_level_ = MemoryPressureLevel::kNone;
    3375     1846139 :   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
    3376        2516 :     CollectGarbageOnMemoryPressure();
    3377     1843623 :   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
    3378           0 :     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3379             :       StartIncrementalMarking(kReduceMemoryFootprintMask,
    3380             :                               GarbageCollectionReason::kMemoryPressure);
    3381             :     }
    3382             :   }
    3383     1846139 :   if (memory_reducer_) {
    3384             :     MemoryReducer::Event event;
    3385     1846139 :     event.type = MemoryReducer::kPossibleGarbage;
    3386     1846139 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3387     1846139 :     memory_reducer_->NotifyPossibleGarbage(event);
    3388             :   }
    3389     1846139 : }
    3390             : 
    3391        2516 : void Heap::CollectGarbageOnMemoryPressure() {
    3392             :   const int kGarbageThresholdInBytes = 8 * MB;
    3393             :   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
    3394             :   // This constant is the maximum response time in RAIL performance model.
    3395             :   const double kMaxMemoryPressurePauseMs = 100;
    3396             : 
    3397             :   double start = MonotonicallyIncreasingTimeInMs();
    3398             :   CollectAllGarbage(kReduceMemoryFootprintMask,
    3399             :                     GarbageCollectionReason::kMemoryPressure,
    3400             :                     kGCCallbackFlagCollectAllAvailableGarbage);
    3401        2516 :   EagerlyFreeExternalMemory();
    3402             :   double end = MonotonicallyIncreasingTimeInMs();
    3403             : 
    3404             :   // Estimate how much memory we can free.
    3405        7548 :   int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
    3406        5032 :                               isolate()->isolate_data()->external_memory_;
    3407             :   // If we can potentially free large amount of memory, then start GC right
    3408             :   // away instead of waiting for memory reducer.
    3409        2982 :   if (potential_garbage >= kGarbageThresholdInBytes &&
    3410         466 :       potential_garbage >=
    3411         466 :           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
    3412             :     // If we spent less than half of the time budget, then perform full GC
    3413             :     // Otherwise, start incremental marking.
    3414         466 :     if (end - start < kMaxMemoryPressurePauseMs / 2) {
    3415             :       CollectAllGarbage(kReduceMemoryFootprintMask,
    3416             :                         GarbageCollectionReason::kMemoryPressure,
    3417             :                         kGCCallbackFlagCollectAllAvailableGarbage);
    3418             :     } else {
    3419           0 :       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3420             :         StartIncrementalMarking(kReduceMemoryFootprintMask,
    3421             :                                 GarbageCollectionReason::kMemoryPressure);
    3422             :       }
    3423             :     }
    3424             :   }
    3425        2516 : }
    3426             : 
    3427        2526 : void Heap::MemoryPressureNotification(MemoryPressureLevel level,
    3428             :                                       bool is_isolate_locked) {
    3429             :   MemoryPressureLevel previous = memory_pressure_level_;
    3430             :   memory_pressure_level_ = level;
    3431        5052 :   if ((previous != MemoryPressureLevel::kCritical &&
    3432        2536 :        level == MemoryPressureLevel::kCritical) ||
    3433          20 :       (previous == MemoryPressureLevel::kNone &&
    3434          10 :        level == MemoryPressureLevel::kModerate)) {
    3435        2521 :     if (is_isolate_locked) {
    3436        2510 :       CheckMemoryPressure();
    3437             :     } else {
    3438             :       ExecutionAccess access(isolate());
    3439             :       isolate()->stack_guard()->RequestGC();
    3440          11 :       auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
    3441          11 :           reinterpret_cast<v8::Isolate*>(isolate()));
    3442          11 :       taskrunner->PostTask(
    3443          33 :           base::make_unique<MemoryPressureInterruptTask>(this));
    3444             :     }
    3445             :   }
    3446        2526 : }
    3447             : 
    3448        3781 : void Heap::EagerlyFreeExternalMemory() {
    3449       19304 :   for (Page* page : *old_space()) {
    3450       15523 :     if (!page->SweepingDone()) {
    3451             :       base::MutexGuard guard(page->mutex());
    3452        1080 :       if (!page->SweepingDone()) {
    3453             :         ArrayBufferTracker::FreeDead(
    3454         454 :             page, mark_compact_collector()->non_atomic_marking_state());
    3455             :       }
    3456             :     }
    3457             :   }
    3458        3781 :   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
    3459        3781 : }
    3460             : 
    3461        3439 : void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3462             :                                     void* data) {
    3463             :   const size_t kMaxCallbacks = 100;
    3464        3439 :   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
    3465        3439 :   for (auto callback_data : near_heap_limit_callbacks_) {
    3466           0 :     CHECK_NE(callback_data.first, callback);
    3467             :   }
    3468        6878 :   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
    3469        3439 : }
    3470             : 
    3471        3431 : void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3472             :                                        size_t heap_limit) {
    3473        3431 :   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
    3474        3431 :     if (near_heap_limit_callbacks_[i].first == callback) {
    3475             :       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
    3476        3431 :       if (heap_limit) {
    3477           5 :         RestoreHeapLimit(heap_limit);
    3478             :       }
    3479        3431 :       return;
    3480             :     }
    3481             :   }
    3482           0 :   UNREACHABLE();
    3483             : }
    3484             : 
    3485           4 : void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
    3486             :   initial_max_old_generation_size_threshold_ =
    3487           4 :       initial_max_old_generation_size_ * threshold_percent;
    3488           4 : }
    3489             : 
    3490          79 : bool Heap::InvokeNearHeapLimitCallback() {
    3491          79 :   if (near_heap_limit_callbacks_.size() > 0) {
    3492             :     HandleScope scope(isolate());
    3493             :     v8::NearHeapLimitCallback callback =
    3494          22 :         near_heap_limit_callbacks_.back().first;
    3495          22 :     void* data = near_heap_limit_callbacks_.back().second;
    3496          22 :     size_t heap_limit = callback(data, max_old_generation_size_,
    3497          22 :                                  initial_max_old_generation_size_);
    3498          22 :     if (heap_limit > max_old_generation_size_) {
    3499          22 :       max_old_generation_size_ = heap_limit;
    3500             :       return true;
    3501             :     }
    3502             :   }
    3503             :   return false;
    3504             : }
    3505             : 
    3506           0 : void Heap::CollectCodeStatistics() {
    3507           0 :   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
    3508           0 :   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
    3509             :   // We do not look for code in new space, or map space.  If code
    3510             :   // somehow ends up in those spaces, we would miss it here.
    3511           0 :   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
    3512           0 :   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
    3513           0 :   CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
    3514           0 : }
    3515             : 
    3516             : #ifdef DEBUG
    3517             : 
    3518             : void Heap::Print() {
    3519             :   if (!HasBeenSetUp()) return;
    3520             :   isolate()->PrintStack(stdout);
    3521             : 
    3522             :   for (SpaceIterator it(this); it.has_next();) {
    3523             :     it.next()->Print();
    3524             :   }
    3525             : }
    3526             : 
    3527             : 
    3528             : void Heap::ReportCodeStatistics(const char* title) {
    3529             :   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
    3530             :   CollectCodeStatistics();
    3531             :   CodeStatistics::ReportCodeStatistics(isolate());
    3532             : }
    3533             : 
    3534             : #endif  // DEBUG
    3535             : 
    3536       94929 : const char* Heap::GarbageCollectionReasonToString(
    3537             :     GarbageCollectionReason gc_reason) {
    3538       94929 :   switch (gc_reason) {
    3539             :     case GarbageCollectionReason::kAllocationFailure:
    3540             :       return "allocation failure";
    3541             :     case GarbageCollectionReason::kAllocationLimit:
    3542           0 :       return "allocation limit";
    3543             :     case GarbageCollectionReason::kContextDisposal:
    3544         184 :       return "context disposal";
    3545             :     case GarbageCollectionReason::kCountersExtension:
    3546           0 :       return "counters extension";
    3547             :     case GarbageCollectionReason::kDebugger:
    3548       14380 :       return "debugger";
    3549             :     case GarbageCollectionReason::kDeserializer:
    3550           5 :       return "deserialize";
    3551             :     case GarbageCollectionReason::kExternalMemoryPressure:
    3552         833 :       return "external memory pressure";
    3553             :     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
    3554        6808 :       return "finalize incremental marking via stack guard";
    3555             :     case GarbageCollectionReason::kFinalizeMarkingViaTask:
    3556       13089 :       return "finalize incremental marking via task";
    3557             :     case GarbageCollectionReason::kFullHashtable:
    3558           0 :       return "full hash-table";
    3559             :     case GarbageCollectionReason::kHeapProfiler:
    3560        1140 :       return "heap profiler";
    3561             :     case GarbageCollectionReason::kIdleTask:
    3562        1769 :       return "idle task";
    3563             :     case GarbageCollectionReason::kLastResort:
    3564          10 :       return "last resort";
    3565             :     case GarbageCollectionReason::kLowMemoryNotification:
    3566        1081 :       return "low memory notification";
    3567             :     case GarbageCollectionReason::kMakeHeapIterable:
    3568           0 :       return "make heap iterable";
    3569             :     case GarbageCollectionReason::kMemoryPressure:
    3570        2982 :       return "memory pressure";
    3571             :     case GarbageCollectionReason::kMemoryReducer:
    3572           0 :       return "memory reducer";
    3573             :     case GarbageCollectionReason::kRuntime:
    3574         336 :       return "runtime";
    3575             :     case GarbageCollectionReason::kSamplingProfiler:
    3576          20 :       return "sampling profiler";
    3577             :     case GarbageCollectionReason::kSnapshotCreator:
    3578         392 :       return "snapshot creator";
    3579             :     case GarbageCollectionReason::kTesting:
    3580       28999 :       return "testing";
    3581             :     case GarbageCollectionReason::kExternalFinalize:
    3582           5 :       return "external finalize";
    3583             :     case GarbageCollectionReason::kUnknown:
    3584           5 :       return "unknown";
    3585             :   }
    3586           0 :   UNREACHABLE();
    3587             : }
    3588             : 
    3589     1934474 : bool Heap::Contains(HeapObject value) {
    3590             :   // Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
    3591             :   // shared RO_SPACE.
    3592             :   // TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
    3593             :   // appropriate.
    3594     3868948 :   if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
    3595             :     return true;
    3596             :   }
    3597     1934474 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3598             :     return false;
    3599             :   }
    3600     3868948 :   return HasBeenSetUp() &&
    3601     3838107 :          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
    3602          20 :           code_space_->Contains(value) || map_space_->Contains(value) ||
    3603           5 :           lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
    3604           0 :           new_lo_space_->Contains(value));
    3605             : }
    3606             : 
    3607          70 : bool Heap::InSpace(HeapObject value, AllocationSpace space) {
    3608          70 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3609             :     return false;
    3610             :   }
    3611          70 :   if (!HasBeenSetUp()) return false;
    3612             : 
    3613          70 :   switch (space) {
    3614             :     case NEW_SPACE:
    3615          15 :       return new_space_->ToSpaceContains(value);
    3616             :     case OLD_SPACE:
    3617          30 :       return old_space_->Contains(value);
    3618             :     case CODE_SPACE:
    3619           0 :       return code_space_->Contains(value);
    3620             :     case MAP_SPACE:
    3621           0 :       return map_space_->Contains(value);
    3622             :     case LO_SPACE:
    3623          30 :       return lo_space_->Contains(value);
    3624             :     case CODE_LO_SPACE:
    3625          10 :       return code_lo_space_->Contains(value);
    3626             :     case NEW_LO_SPACE:
    3627           0 :       return new_lo_space_->Contains(value);
    3628             :     case RO_SPACE:
    3629           0 :       return read_only_space_->Contains(value);
    3630             :   }
    3631           0 :   UNREACHABLE();
    3632             : }
    3633             : 
    3634           0 : bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
    3635           0 :   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
    3636             :     return false;
    3637             :   }
    3638           0 :   if (!HasBeenSetUp()) return false;
    3639             : 
    3640           0 :   switch (space) {
    3641             :     case NEW_SPACE:
    3642           0 :       return new_space_->ToSpaceContainsSlow(addr);
    3643             :     case OLD_SPACE:
    3644           0 :       return old_space_->ContainsSlow(addr);
    3645             :     case CODE_SPACE:
    3646           0 :       return code_space_->ContainsSlow(addr);
    3647             :     case MAP_SPACE:
    3648           0 :       return map_space_->ContainsSlow(addr);
    3649             :     case LO_SPACE:
    3650           0 :       return lo_space_->ContainsSlow(addr);
    3651             :     case CODE_LO_SPACE:
    3652           0 :       return code_lo_space_->ContainsSlow(addr);
    3653             :     case NEW_LO_SPACE:
    3654           0 :       return new_lo_space_->ContainsSlow(addr);
    3655             :     case RO_SPACE:
    3656           0 :       return read_only_space_->ContainsSlow(addr);
    3657             :   }
    3658           0 :   UNREACHABLE();
    3659             : }
    3660             : 
    3661          40 : bool Heap::IsValidAllocationSpace(AllocationSpace space) {
    3662          40 :   switch (space) {
    3663             :     case NEW_SPACE:
    3664             :     case OLD_SPACE:
    3665             :     case CODE_SPACE:
    3666             :     case MAP_SPACE:
    3667             :     case LO_SPACE:
    3668             :     case NEW_LO_SPACE:
    3669             :     case CODE_LO_SPACE:
    3670             :     case RO_SPACE:
    3671             :       return true;
    3672             :     default:
    3673           0 :       return false;
    3674             :   }
    3675             : }
    3676             : 
    3677             : #ifdef VERIFY_HEAP
    3678             : class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
    3679             :  public:
    3680             :   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
    3681             :       : VerifyPointersVisitor(heap) {}
    3682             : 
    3683             :  protected:
    3684             :   void VerifyPointers(HeapObject host, MaybeObjectSlot start,
    3685             :                       MaybeObjectSlot end) override {
    3686             :     if (!host.is_null()) {
    3687             :       CHECK(ReadOnlyHeap::Contains(host->map()));
    3688             :     }
    3689             :     VerifyPointersVisitor::VerifyPointers(host, start, end);
    3690             : 
    3691             :     for (MaybeObjectSlot current = start; current < end; ++current) {
    3692             :       HeapObject heap_object;
    3693             :       if ((*current)->GetHeapObject(&heap_object)) {
    3694             :         CHECK(ReadOnlyHeap::Contains(heap_object));
    3695             :       }
    3696             :     }
    3697             :   }
    3698             : };
    3699             : 
    3700             : void Heap::Verify() {
    3701             :   CHECK(HasBeenSetUp());
    3702             :   HandleScope scope(isolate());
    3703             : 
    3704             :   // We have to wait here for the sweeper threads to have an iterable heap.
    3705             :   mark_compact_collector()->EnsureSweepingCompleted();
    3706             : 
    3707             :   VerifyPointersVisitor visitor(this);
    3708             :   IterateRoots(&visitor, VISIT_ONLY_STRONG);
    3709             : 
    3710             :   if (!isolate()->context().is_null() &&
    3711             :       !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
    3712             :     NormalizedMapCache::cast(*isolate()->normalized_map_cache())
    3713             :         ->NormalizedMapCacheVerify(isolate());
    3714             :   }
    3715             : 
    3716             :   VerifySmisVisitor smis_visitor;
    3717             :   IterateSmiRoots(&smis_visitor);
    3718             : 
    3719             :   new_space_->Verify(isolate());
    3720             : 
    3721             :   old_space_->Verify(isolate(), &visitor);
    3722             :   map_space_->Verify(isolate(), &visitor);
    3723             : 
    3724             :   VerifyPointersVisitor no_dirty_regions_visitor(this);
    3725             :   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
    3726             : 
    3727             :   lo_space_->Verify(isolate());
    3728             :   code_lo_space_->Verify(isolate());
    3729             :   new_lo_space_->Verify(isolate());
    3730             : 
    3731             :   VerifyReadOnlyPointersVisitor read_only_visitor(this);
    3732             :   read_only_space_->Verify(isolate(), &read_only_visitor);
    3733             : }
    3734             : 
    3735             : class SlotVerifyingVisitor : public ObjectVisitor {
    3736             :  public:
    3737             :   SlotVerifyingVisitor(std::set<Address>* untyped,
    3738             :                        std::set<std::pair<SlotType, Address> >* typed)
    3739             :       : untyped_(untyped), typed_(typed) {}
    3740             : 
    3741             :   virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
    3742             : 
    3743             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3744             :                      ObjectSlot end) override {
    3745             : #ifdef DEBUG
    3746             :     for (ObjectSlot slot = start; slot < end; ++slot) {
    3747             :       DCHECK(!HasWeakHeapObjectTag(*slot));
    3748             :     }
    3749             : #endif  // DEBUG
    3750             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3751             :   }
    3752             : 
    3753             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3754             :                      MaybeObjectSlot end) final {
    3755             :     for (MaybeObjectSlot slot = start; slot < end; ++slot) {
    3756             :       if (ShouldHaveBeenRecorded(host, *slot)) {
    3757             :         CHECK_GT(untyped_->count(slot.address()), 0);
    3758             :       }
    3759             :     }
    3760             :   }
    3761             : 
    3762             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3763             :     Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3764             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3765             :       CHECK(
    3766             :           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
    3767             :           (rinfo->IsInConstantPool() &&
    3768             :            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
    3769             :     }
    3770             :   }
    3771             : 
    3772             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3773             :     Object target = rinfo->target_object();
    3774             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3775             :       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
    3776             :             (rinfo->IsInConstantPool() &&
    3777             :              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
    3778             :     }
    3779             :   }
    3780             : 
    3781             :  protected:
    3782             :   bool InUntypedSet(ObjectSlot slot) {
    3783             :     return untyped_->count(slot.address()) > 0;
    3784             :   }
    3785             : 
    3786             :  private:
    3787             :   bool InTypedSet(SlotType type, Address slot) {
    3788             :     return typed_->count(std::make_pair(type, slot)) > 0;
    3789             :   }
    3790             :   std::set<Address>* untyped_;
    3791             :   std::set<std::pair<SlotType, Address> >* typed_;
    3792             : };
    3793             : 
    3794             : class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
    3795             :  public:
    3796             :   OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
    3797             :                                std::set<std::pair<SlotType, Address>>* typed,
    3798             :                                EphemeronRememberedSet* ephemeron_remembered_set)
    3799             :       : SlotVerifyingVisitor(untyped, typed),
    3800             :         ephemeron_remembered_set_(ephemeron_remembered_set) {}
    3801             : 
    3802             :   bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
    3803             :     DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
    3804             :                    Heap::InToPage(target));
    3805             :     return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
    3806             :            !Heap::InYoungGeneration(host);
    3807             :   }
    3808             : 
    3809             :   void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
    3810             :                       ObjectSlot target) override {
    3811             :     VisitPointer(host, target);
    3812             :     if (FLAG_minor_mc) {
    3813             :       VisitPointer(host, target);
    3814             :     } else {
    3815             :       // Keys are handled separately and should never appear in this set.
    3816             :       CHECK(!InUntypedSet(key));
    3817             :       Object k = *key;
    3818             :       if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
    3819             :         EphemeronHashTable table = EphemeronHashTable::cast(host);
    3820             :         auto it = ephemeron_remembered_set_->find(table);
    3821             :         CHECK(it != ephemeron_remembered_set_->end());
    3822             :         int slot_index =
    3823             :             EphemeronHashTable::SlotToIndex(table.address(), key.address());
    3824             :         int entry = EphemeronHashTable::IndexToEntry(slot_index);
    3825             :         CHECK(it->second.find(entry) != it->second.end());
    3826             :       }
    3827             :     }
    3828             :   }
    3829             : 
    3830             :  private:
    3831             :   EphemeronRememberedSet* ephemeron_remembered_set_;
    3832             : };
    3833             : 
    3834             : template <RememberedSetType direction>
    3835             : void CollectSlots(MemoryChunk* chunk, Address start, Address end,
    3836             :                   std::set<Address>* untyped,
    3837             :                   std::set<std::pair<SlotType, Address> >* typed) {
    3838             :   RememberedSet<direction>::Iterate(
    3839             :       chunk,
    3840             :       [start, end, untyped](MaybeObjectSlot slot) {
    3841             :         if (start <= slot.address() && slot.address() < end) {
    3842             :           untyped->insert(slot.address());
    3843             :         }
    3844             :         return KEEP_SLOT;
    3845             :       },
    3846             :       SlotSet::PREFREE_EMPTY_BUCKETS);
    3847             :   RememberedSet<direction>::IterateTyped(
    3848             :       chunk, [=](SlotType type, Address slot) {
    3849             :         if (start <= slot && slot < end) {
    3850             :           typed->insert(std::make_pair(type, slot));
    3851             :         }
    3852             :         return KEEP_SLOT;
    3853             :       });
    3854             : }
    3855             : 
    3856             : void Heap::VerifyRememberedSetFor(HeapObject object) {
    3857             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3858             :   DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
    3859             :   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
    3860             :   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
    3861             :       chunk->mutex());
    3862             :   Address start = object->address();
    3863             :   Address end = start + object->Size();
    3864             :   std::set<Address> old_to_new;
    3865             :   std::set<std::pair<SlotType, Address> > typed_old_to_new;
    3866             :   if (!InYoungGeneration(object)) {
    3867             :     store_buffer()->MoveAllEntriesToRememberedSet();
    3868             :     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
    3869             :     OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
    3870             :                                          &this->ephemeron_remembered_set_);
    3871             :     object->IterateBody(&visitor);
    3872             :   }
    3873             :   // TODO(ulan): Add old to old slot set verification once all weak objects
    3874             :   // have their own instance types and slots are recorded for all weal fields.
    3875             : }
    3876             : #endif
    3877             : 
    3878             : #ifdef DEBUG
    3879             : void Heap::VerifyCountersAfterSweeping() {
    3880             :   PagedSpaces spaces(this);
    3881             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3882             :        space = spaces.next()) {
    3883             :     space->VerifyCountersAfterSweeping();
    3884             :   }
    3885             : }
    3886             : 
    3887             : void Heap::VerifyCountersBeforeConcurrentSweeping() {
    3888             :   PagedSpaces spaces(this);
    3889             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3890             :        space = spaces.next()) {
    3891             :     space->VerifyCountersBeforeConcurrentSweeping();
    3892             :   }
    3893             : }
    3894             : #endif
    3895             : 
    3896           0 : void Heap::ZapFromSpace() {
    3897           0 :   if (!new_space_->IsFromSpaceCommitted()) return;
    3898           0 :   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
    3899           0 :     memory_allocator()->ZapBlock(page->area_start(),
    3900           0 :                                  page->HighWaterMark() - page->area_start(),
    3901           0 :                                  ZapValue());
    3902             :   }
    3903             : }
    3904             : 
    3905     1212854 : void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
    3906             : #ifdef DEBUG
    3907             :   DCHECK(IsAligned(start_address, kIntSize));
    3908             :   for (int i = 0; i < size_in_bytes / kIntSize; i++) {
    3909             :     Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
    3910             :   }
    3911             : #endif
    3912     1212854 : }
    3913             : 
    3914             : // TODO(ishell): move builtin accessors out from Heap.
    3915   149268617 : Code Heap::builtin(int index) {
    3916             :   DCHECK(Builtins::IsBuiltinId(index));
    3917   149268617 :   return Code::cast(Object(isolate()->builtins_table()[index]));
    3918             : }
    3919             : 
    3920    51285617 : Address Heap::builtin_address(int index) {
    3921             :   DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
    3922   430912244 :   return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
    3923             : }
    3924             : 
    3925      255696 : void Heap::set_builtin(int index, Code builtin) {
    3926             :   DCHECK(Builtins::IsBuiltinId(index));
    3927             :   DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
    3928             :   // The given builtin may be completely uninitialized thus we cannot check its
    3929             :   // type here.
    3930      511392 :   isolate()->builtins_table()[index] = builtin.ptr();
    3931      255696 : }
    3932             : 
    3933       95342 : void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
    3934       96632 :   IterateStrongRoots(v, mode);
    3935       96632 :   IterateWeakRoots(v, mode);
    3936       95342 : }
    3937             : 
    3938      159279 : void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
    3939      318558 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3940      318558 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3941             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3942      159279 :   v->VisitRootPointer(Root::kStringTable, nullptr,
    3943      318558 :                       FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
    3944      159279 :   v->Synchronize(VisitorSynchronization::kStringTable);
    3945      159279 :   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
    3946             :       mode != VISIT_FOR_SERIALIZATION) {
    3947             :     // Scavenge collections have special processing for this.
    3948             :     // Do not visit for serialization, since the external string table will
    3949             :     // be populated from scratch upon deserialization.
    3950        1688 :     external_string_table_.IterateAll(v);
    3951             :   }
    3952      159279 :   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
    3953      159279 : }
    3954             : 
    3955       62647 : void Heap::IterateSmiRoots(RootVisitor* v) {
    3956             :   // Acquire execution access since we are going to read stack limit values.
    3957             :   ExecutionAccess access(isolate());
    3958             :   v->VisitRootPointers(Root::kSmiRootList, nullptr,
    3959             :                        roots_table().smi_roots_begin(),
    3960      125294 :                        roots_table().smi_roots_end());
    3961       62647 :   v->Synchronize(VisitorSynchronization::kSmiRootList);
    3962       62647 : }
    3963             : 
    3964             : // We cannot avoid stale handles to left-trimmed objects, but can only make
    3965             : // sure all handles still needed are updated. Filter out a stale pointer
    3966             : // and clear the slot to allow post processing of handles (needed because
    3967             : // the sweeper might actually free the underlying page).
    3968      275539 : class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
    3969             :  public:
    3970      275539 :   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
    3971             :     USE(heap_);
    3972             :   }
    3973             : 
    3974           0 :   void VisitRootPointer(Root root, const char* description,
    3975             :                         FullObjectSlot p) override {
    3976           0 :     FixHandle(p);
    3977           0 :   }
    3978             : 
    3979      631627 :   void VisitRootPointers(Root root, const char* description,
    3980             :                          FullObjectSlot start, FullObjectSlot end) override {
    3981    92851228 :     for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
    3982      631627 :   }
    3983             : 
    3984             :  private:
    3985    91587974 :   inline void FixHandle(FullObjectSlot p) {
    3986    91587974 :     if (!(*p)->IsHeapObject()) return;
    3987             :     HeapObject current = HeapObject::cast(*p);
    3988             :     const MapWord map_word = current->map_word();
    3989   152701123 :     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
    3990             : #ifdef DEBUG
    3991             :       // We need to find a FixedArrayBase map after walking the fillers.
    3992             :       while (current->IsFiller()) {
    3993             :         Address next = current->ptr();
    3994             :         if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
    3995             :           next += kTaggedSize;
    3996             :         } else if (current->map() ==
    3997             :                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
    3998             :           next += 2 * kTaggedSize;
    3999             :         } else {
    4000             :           next += current->Size();
    4001             :         }
    4002             :         current = HeapObject::cast(Object(next));
    4003             :       }
    4004             :       DCHECK(current->IsFixedArrayBase());
    4005             : #endif  // DEBUG
    4006             :       p.store(Smi::kZero);
    4007             :     }
    4008             :   }
    4009             : 
    4010             :   Heap* heap_;
    4011             : };
    4012             : 
    4013      275539 : void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
    4014      551078 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    4015      551078 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    4016             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    4017             :   v->VisitRootPointers(Root::kStrongRootList, nullptr,
    4018             :                        roots_table().strong_roots_begin(),
    4019      551078 :                        roots_table().strong_roots_end());
    4020      275539 :   v->Synchronize(VisitorSynchronization::kStrongRootList);
    4021             : 
    4022      275539 :   isolate_->bootstrapper()->Iterate(v);
    4023      275539 :   v->Synchronize(VisitorSynchronization::kBootstrapper);
    4024      275539 :   isolate_->Iterate(v);
    4025      275539 :   v->Synchronize(VisitorSynchronization::kTop);
    4026      275539 :   Relocatable::Iterate(isolate_, v);
    4027      275539 :   v->Synchronize(VisitorSynchronization::kRelocatable);
    4028      275539 :   isolate_->debug()->Iterate(v);
    4029      275539 :   v->Synchronize(VisitorSynchronization::kDebug);
    4030             : 
    4031      275539 :   isolate_->compilation_cache()->Iterate(v);
    4032      275539 :   v->Synchronize(VisitorSynchronization::kCompilationCache);
    4033             : 
    4034             :   // Iterate over local handles in handle scopes.
    4035             :   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
    4036      275539 :   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
    4037      275539 :   isolate_->handle_scope_implementer()->Iterate(v);
    4038      275539 :   isolate_->IterateDeferredHandles(v);
    4039      275539 :   v->Synchronize(VisitorSynchronization::kHandleScope);
    4040             : 
    4041             :   // Iterate over the builtin code objects and code stubs in the
    4042             :   // heap. Note that it is not necessary to iterate over code objects
    4043             :   // on scavenge collections.
    4044      275539 :   if (!isMinorGC) {
    4045      249441 :     IterateBuiltins(v);
    4046      249441 :     v->Synchronize(VisitorSynchronization::kBuiltins);
    4047             : 
    4048             :     // The dispatch table is set up directly from the builtins using
    4049             :     // IntitializeDispatchTable so there is no need to iterate to create it.
    4050      249441 :     if (mode != VISIT_FOR_SERIALIZATION) {
    4051             :       // Currently we iterate the dispatch table to update pointers to possibly
    4052             :       // moved Code objects for bytecode handlers.
    4053             :       // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
    4054             :       // immovable) in every build configuration.
    4055      186794 :       isolate_->interpreter()->IterateDispatchTable(v);
    4056      186794 :       v->Synchronize(VisitorSynchronization::kDispatchTable);
    4057             :     }
    4058             :   }
    4059             : 
    4060             :   // Iterate over global handles.
    4061      275539 :   switch (mode) {
    4062             :     case VISIT_FOR_SERIALIZATION:
    4063             :       // Global handles are not iterated by the serializer. Values referenced by
    4064             :       // global handles need to be added manually.
    4065             :       break;
    4066             :     case VISIT_ONLY_STRONG:
    4067      116658 :       isolate_->global_handles()->IterateStrongRoots(v);
    4068      116658 :       break;
    4069             :     case VISIT_ALL_IN_SCAVENGE:
    4070             :     case VISIT_ALL_IN_MINOR_MC_MARK:
    4071       26098 :       isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
    4072       26098 :       break;
    4073             :     case VISIT_ALL_IN_MINOR_MC_UPDATE:
    4074           0 :       isolate_->global_handles()->IterateAllYoungRoots(v);
    4075           0 :       break;
    4076             :     case VISIT_ALL_IN_SWEEP_NEWSPACE:
    4077             :     case VISIT_ALL:
    4078       70136 :       isolate_->global_handles()->IterateAllRoots(v);
    4079       70136 :       break;
    4080             :   }
    4081      275539 :   v->Synchronize(VisitorSynchronization::kGlobalHandles);
    4082             : 
    4083             :   // Iterate over eternal handles. Eternal handles are not iterated by the
    4084             :   // serializer. Values referenced by eternal handles need to be added manually.
    4085      275539 :   if (mode != VISIT_FOR_SERIALIZATION) {
    4086      212892 :     if (isMinorGC) {
    4087       26098 :       isolate_->eternal_handles()->IterateYoungRoots(v);
    4088             :     } else {
    4089      186794 :       isolate_->eternal_handles()->IterateAllRoots(v);
    4090             :     }
    4091             :   }
    4092      275539 :   v->Synchronize(VisitorSynchronization::kEternalHandles);
    4093             : 
    4094             :   // Iterate over pointers being held by inactive threads.
    4095      275539 :   isolate_->thread_manager()->Iterate(v);
    4096      275539 :   v->Synchronize(VisitorSynchronization::kThreadManager);
    4097             : 
    4098             :   // Iterate over other strong roots (currently only identity maps).
    4099      551423 :   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
    4100      275884 :     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
    4101             :   }
    4102      275539 :   v->Synchronize(VisitorSynchronization::kStrongRoots);
    4103             : 
    4104             :   // Iterate over pending Microtasks stored in MicrotaskQueues.
    4105      275539 :   MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
    4106      275539 :   if (default_microtask_queue) {
    4107             :     MicrotaskQueue* microtask_queue = default_microtask_queue;
    4108             :     do {
    4109      275541 :       microtask_queue->IterateMicrotasks(v);
    4110             :       microtask_queue = microtask_queue->next();
    4111      275541 :     } while (microtask_queue != default_microtask_queue);
    4112             :   }
    4113             : 
    4114             :   // Iterate over the partial snapshot cache unless serializing or
    4115             :   // deserializing.
    4116      275539 :   if (mode != VISIT_FOR_SERIALIZATION) {
    4117      212892 :     SerializerDeserializer::Iterate(isolate_, v);
    4118      212892 :     v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
    4119             :   }
    4120      275539 : }
    4121             : 
    4122         398 : void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
    4123         398 :   isolate_->global_handles()->IterateWeakRoots(v);
    4124         398 : }
    4125             : 
    4126      249750 : void Heap::IterateBuiltins(RootVisitor* v) {
    4127   759502386 :   for (int i = 0; i < Builtins::builtin_count; i++) {
    4128   759252945 :     v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
    4129   759252719 :                         FullObjectSlot(builtin_address(i)));
    4130             :   }
    4131             : #ifdef V8_EMBEDDED_BUILTINS
    4132             :   // The entry table does not need to be updated if all builtins are embedded.
    4133             :   STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
    4134             : #else
    4135             :   // If builtins are not embedded, they may move and thus the entry table must
    4136             :   // be updated.
    4137             :   // TODO(v8:6666): Remove once builtins are embedded unconditionally.
    4138             :   Builtins::UpdateBuiltinEntryTable(isolate());
    4139             : #endif  // V8_EMBEDDED_BUILTINS
    4140      249441 : }
    4141             : 
    4142             : // TODO(1236194): Since the heap size is configurable on the command line
    4143             : // and through the API, we should gracefully handle the case that the heap
    4144             : // size is not big enough to fit all the initial objects.
    4145       62442 : void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
    4146             :                          size_t max_old_generation_size_in_mb,
    4147             :                          size_t code_range_size_in_mb) {
    4148             :   // Overwrite default configuration.
    4149       62442 :   if (max_semi_space_size_in_kb != 0) {
    4150             :     max_semi_space_size_ =
    4151       60234 :         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
    4152             :   }
    4153       62442 :   if (max_old_generation_size_in_mb != 0) {
    4154       30121 :     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
    4155             :   }
    4156             : 
    4157             :   // If max space size flags are specified overwrite the configuration.
    4158       62442 :   if (FLAG_max_semi_space_size > 0) {
    4159         186 :     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
    4160             :   }
    4161       62442 :   if (FLAG_max_old_space_size > 0) {
    4162             :     max_old_generation_size_ =
    4163          39 :         static_cast<size_t>(FLAG_max_old_space_size) * MB;
    4164             :   }
    4165             : 
    4166             :   if (Page::kPageSize > MB) {
    4167             :     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
    4168             :     max_old_generation_size_ =
    4169             :         RoundUp<Page::kPageSize>(max_old_generation_size_);
    4170             :   }
    4171             : 
    4172       62442 :   if (FLAG_stress_compaction) {
    4173             :     // This will cause more frequent GCs when stressing.
    4174          96 :     max_semi_space_size_ = MB;
    4175             :   }
    4176             : 
    4177             :   // The new space size must be a power of two to support single-bit testing
    4178             :   // for containment.
    4179       62442 :   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
    4180      124884 :       static_cast<uint64_t>(max_semi_space_size_)));
    4181             : 
    4182       62442 :   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
    4183             :     // Start with at least 1*MB semi-space on machines with a lot of memory.
    4184             :     initial_semispace_size_ =
    4185      124470 :         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
    4186             :   }
    4187             : 
    4188       62442 :   if (FLAG_min_semi_space_size > 0) {
    4189             :     size_t initial_semispace_size =
    4190          35 :         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
    4191          35 :     if (initial_semispace_size > max_semi_space_size_) {
    4192           5 :       initial_semispace_size_ = max_semi_space_size_;
    4193           5 :       if (FLAG_trace_gc) {
    4194           0 :         PrintIsolate(isolate_,
    4195             :                      "Min semi-space size cannot be more than the maximum "
    4196             :                      "semi-space size of %" PRIuS " MB\n",
    4197           0 :                      max_semi_space_size_ / MB);
    4198             :       }
    4199             :     } else {
    4200             :       initial_semispace_size_ =
    4201          30 :           RoundUp<Page::kPageSize>(initial_semispace_size);
    4202             :     }
    4203             :   }
    4204             : 
    4205      124884 :   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
    4206             : 
    4207       62442 :   if (FLAG_semi_space_growth_factor < 2) {
    4208           0 :     FLAG_semi_space_growth_factor = 2;
    4209             :   }
    4210             : 
    4211             :   // The old generation is paged and needs at least one page for each space.
    4212             :   int paged_space_count =
    4213             :       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
    4214             :   initial_max_old_generation_size_ = max_old_generation_size_ =
    4215       62442 :       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
    4216       62442 :           max_old_generation_size_);
    4217             : 
    4218       62442 :   if (FLAG_initial_old_space_size > 0) {
    4219           0 :     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
    4220             :   } else {
    4221             :     initial_old_generation_size_ =
    4222       62442 :         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
    4223             :   }
    4224       62442 :   old_generation_allocation_limit_ = initial_old_generation_size_;
    4225             : 
    4226             :   // We rely on being able to allocate new arrays in paged spaces.
    4227             :   DCHECK(kMaxRegularHeapObjectSize >=
    4228             :          (JSArray::kSize +
    4229             :           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
    4230             :           AllocationMemento::kSize));
    4231             : 
    4232       62442 :   code_range_size_ = code_range_size_in_mb * MB;
    4233             : 
    4234       62442 :   configured_ = true;
    4235       62442 : }
    4236             : 
    4237             : 
    4238       94924 : void Heap::AddToRingBuffer(const char* string) {
    4239             :   size_t first_part =
    4240       94924 :       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
    4241       94924 :   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
    4242       94924 :   ring_buffer_end_ += first_part;
    4243       94924 :   if (first_part < strlen(string)) {
    4244       24674 :     ring_buffer_full_ = true;
    4245       24674 :     size_t second_part = strlen(string) - first_part;
    4246       24674 :     memcpy(trace_ring_buffer_, string + first_part, second_part);
    4247       24674 :     ring_buffer_end_ = second_part;
    4248             :   }
    4249       94924 : }
    4250             : 
    4251             : 
    4252          15 : void Heap::GetFromRingBuffer(char* buffer) {
    4253             :   size_t copied = 0;
    4254          15 :   if (ring_buffer_full_) {
    4255           0 :     copied = kTraceRingBufferSize - ring_buffer_end_;
    4256           0 :     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
    4257             :   }
    4258          15 :   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
    4259          15 : }
    4260             : 
    4261       32319 : void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
    4262             : 
    4263          15 : void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
    4264          15 :   *stats->start_marker = HeapStats::kStartMarker;
    4265          15 :   *stats->end_marker = HeapStats::kEndMarker;
    4266          15 :   *stats->ro_space_size = read_only_space_->Size();
    4267          30 :   *stats->ro_space_capacity = read_only_space_->Capacity();
    4268          15 :   *stats->new_space_size = new_space_->Size();
    4269          30 :   *stats->new_space_capacity = new_space_->Capacity();
    4270          15 :   *stats->old_space_size = old_space_->SizeOfObjects();
    4271          30 :   *stats->old_space_capacity = old_space_->Capacity();
    4272          15 :   *stats->code_space_size = code_space_->SizeOfObjects();
    4273          30 :   *stats->code_space_capacity = code_space_->Capacity();
    4274          15 :   *stats->map_space_size = map_space_->SizeOfObjects();
    4275          30 :   *stats->map_space_capacity = map_space_->Capacity();
    4276          15 :   *stats->lo_space_size = lo_space_->Size();
    4277          15 :   *stats->code_lo_space_size = code_lo_space_->Size();
    4278          15 :   isolate_->global_handles()->RecordStats(stats);
    4279          30 :   *stats->memory_allocator_size = memory_allocator()->Size();
    4280          15 :   *stats->memory_allocator_capacity =
    4281          15 :       memory_allocator()->Size() + memory_allocator()->Available();
    4282          15 :   *stats->os_error = base::OS::GetLastError();
    4283          30 :   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
    4284          30 :   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
    4285          15 :   if (take_snapshot) {
    4286           0 :     HeapIterator iterator(this);
    4287           0 :     for (HeapObject obj = iterator.next(); !obj.is_null();
    4288             :          obj = iterator.next()) {
    4289             :       InstanceType type = obj->map()->instance_type();
    4290             :       DCHECK(0 <= type && type <= LAST_TYPE);
    4291           0 :       stats->objects_per_type[type]++;
    4292           0 :       stats->size_per_type[type] += obj->Size();
    4293             :     }
    4294             :   }
    4295          15 :   if (stats->last_few_messages != nullptr)
    4296          15 :     GetFromRingBuffer(stats->last_few_messages);
    4297          15 :   if (stats->js_stacktrace != nullptr) {
    4298             :     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
    4299             :     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
    4300          15 :     if (gc_state() == Heap::NOT_IN_GC) {
    4301          15 :       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
    4302             :     } else {
    4303           0 :       accumulator.Add("Cannot get stack trace in GC.");
    4304             :     }
    4305             :   }
    4306          15 : }
    4307             : 
    4308     1875646 : size_t Heap::OldGenerationSizeOfObjects() {
    4309             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
    4310             :   size_t total = 0;
    4311     9378229 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4312             :        space = spaces.next()) {
    4313     7502585 :     total += space->SizeOfObjects();
    4314             :   }
    4315     1875646 :   return total + lo_space_->SizeOfObjects();
    4316             : }
    4317             : 
    4318         310 : uint64_t Heap::PromotedExternalMemorySize() {
    4319             :   IsolateData* isolate_data = isolate()->isolate_data();
    4320     1026880 :   if (isolate_data->external_memory_ <=
    4321      513440 :       isolate_data->external_memory_at_last_mark_compact_) {
    4322             :     return 0;
    4323             :   }
    4324             :   return static_cast<uint64_t>(
    4325       29303 :       isolate_data->external_memory_ -
    4326       29303 :       isolate_data->external_memory_at_last_mark_compact_);
    4327             : }
    4328             : 
    4329        3450 : bool Heap::ShouldOptimizeForLoadTime() {
    4330           0 :   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
    4331        3450 :          !AllocationLimitOvershotByLargeMargin() &&
    4332             :          MonotonicallyIncreasingTimeInMs() <
    4333        3450 :              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
    4334             : }
    4335             : 
    4336             : // This predicate is called when an old generation space cannot allocated from
    4337             : // the free list and is about to add a new page. Returning false will cause a
    4338             : // major GC. It happens when the old generation allocation limit is reached and
    4339             : // - either we need to optimize for memory usage,
    4340             : // - or the incremental marking is not in progress and we cannot start it.
    4341      535996 : bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
    4342      535996 :   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
    4343             :   // We reached the old generation allocation limit.
    4344             : 
    4345         870 :   if (ShouldOptimizeForMemoryUsage()) return false;
    4346             : 
    4347         870 :   if (ShouldOptimizeForLoadTime()) return true;
    4348             : 
    4349         870 :   if (incremental_marking()->NeedsFinalization()) {
    4350         661 :     return !AllocationLimitOvershotByLargeMargin();
    4351             :   }
    4352             : 
    4353         319 :   if (incremental_marking()->IsStopped() &&
    4354         110 :       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
    4355             :     // We cannot start incremental marking.
    4356             :     return false;
    4357             :   }
    4358         100 :   return true;
    4359             : }
    4360             : 
    4361       78218 : Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
    4362       78218 :   if (ShouldReduceMemory() || FLAG_stress_compaction) {
    4363             :     return Heap::HeapGrowingMode::kMinimal;
    4364             :   }
    4365             : 
    4366       70904 :   if (ShouldOptimizeForMemoryUsage()) {
    4367             :     return Heap::HeapGrowingMode::kConservative;
    4368             :   }
    4369             : 
    4370       70857 :   if (memory_reducer()->ShouldGrowHeapSlowly()) {
    4371             :     return Heap::HeapGrowingMode::kSlow;
    4372             :   }
    4373             : 
    4374       70838 :   return Heap::HeapGrowingMode::kDefault;
    4375             : }
    4376             : 
    4377             : // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
    4378             : // The kNoLimit means that either incremental marking is disabled or it is too
    4379             : // early to start incremental marking.
    4380             : // The kSoftLimit means that incremental marking should be started soon.
    4381             : // The kHardLimit means that incremental marking should be started immediately.
    4382     1376068 : Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
    4383             :   // Code using an AlwaysAllocateScope assumes that the GC state does not
    4384             :   // change; that implies that no marking steps must be performed.
    4385     2230078 :   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
    4386             :     // Incremental marking is disabled or it is too early to start.
    4387             :     return IncrementalMarkingLimit::kNoLimit;
    4388             :   }
    4389      852665 :   if (FLAG_stress_incremental_marking) {
    4390             :     return IncrementalMarkingLimit::kHardLimit;
    4391             :   }
    4392      824026 :   if (OldGenerationSizeOfObjects() <=
    4393             :       IncrementalMarking::kActivationThreshold) {
    4394             :     // Incremental marking is disabled or it is too early to start.
    4395             :     return IncrementalMarkingLimit::kNoLimit;
    4396             :   }
    4397       45223 :   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
    4398             :       HighMemoryPressure()) {
    4399             :     // If there is high memory pressure or stress testing is enabled, then
    4400             :     // start marking immediately.
    4401             :     return IncrementalMarkingLimit::kHardLimit;
    4402             :   }
    4403             : 
    4404       22609 :   if (FLAG_stress_marking > 0) {
    4405             :     double gained_since_last_gc =
    4406           0 :         PromotedSinceLastGC() +
    4407           0 :         (isolate()->isolate_data()->external_memory_ -
    4408           0 :          isolate()->isolate_data()->external_memory_at_last_mark_compact_);
    4409             :     double size_before_gc =
    4410           0 :         OldGenerationObjectsAndPromotedExternalMemorySize() -
    4411           0 :         gained_since_last_gc;
    4412           0 :     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
    4413           0 :     if (bytes_to_limit > 0) {
    4414           0 :       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
    4415             : 
    4416           0 :       if (FLAG_trace_stress_marking) {
    4417             :         isolate()->PrintWithTimestamp(
    4418             :             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
    4419           0 :             current_percent);
    4420             :       }
    4421             : 
    4422           0 :       if (FLAG_fuzzer_gc_analysis) {
    4423             :         // Skips values >=100% since they already trigger marking.
    4424           0 :         if (current_percent < 100.0) {
    4425             :           max_marking_limit_reached_ =
    4426           0 :               std::max(max_marking_limit_reached_, current_percent);
    4427             :         }
    4428           0 :       } else if (static_cast<int>(current_percent) >=
    4429           0 :                  stress_marking_percentage_) {
    4430           0 :         stress_marking_percentage_ = NextStressMarkingLimit();
    4431           0 :         return IncrementalMarkingLimit::kHardLimit;
    4432             :       }
    4433             :     }
    4434             :   }
    4435             : 
    4436       22609 :   size_t old_generation_space_available = OldGenerationSpaceAvailable();
    4437             : 
    4438       45218 :   if (old_generation_space_available > new_space_->Capacity()) {
    4439             :     return IncrementalMarkingLimit::kNoLimit;
    4440             :   }
    4441        2584 :   if (ShouldOptimizeForMemoryUsage()) {
    4442             :     return IncrementalMarkingLimit::kHardLimit;
    4443             :   }
    4444        2580 :   if (ShouldOptimizeForLoadTime()) {
    4445             :     return IncrementalMarkingLimit::kNoLimit;
    4446             :   }
    4447        2580 :   if (old_generation_space_available == 0) {
    4448             :     return IncrementalMarkingLimit::kHardLimit;
    4449             :   }
    4450        2468 :   return IncrementalMarkingLimit::kSoftLimit;
    4451             : }
    4452             : 
    4453          10 : void Heap::EnableInlineAllocation() {
    4454        8198 :   if (!inline_allocation_disabled_) return;
    4455        8198 :   inline_allocation_disabled_ = false;
    4456             : 
    4457             :   // Update inline allocation limit for new space.
    4458        8198 :   new_space()->UpdateInlineAllocationLimit(0);
    4459             : }
    4460             : 
    4461             : 
    4462        8222 : void Heap::DisableInlineAllocation() {
    4463        8222 :   if (inline_allocation_disabled_) return;
    4464        8222 :   inline_allocation_disabled_ = true;
    4465             : 
    4466             :   // Update inline allocation limit for new space.
    4467        8222 :   new_space()->UpdateInlineAllocationLimit(0);
    4468             : 
    4469             :   // Update inline allocation limit for old spaces.
    4470             :   PagedSpaces spaces(this);
    4471       16444 :   CodeSpaceMemoryModificationScope modification_scope(this);
    4472       32888 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4473             :        space = spaces.next()) {
    4474       24666 :     space->FreeLinearAllocationArea();
    4475             :   }
    4476             : }
    4477             : 
    4478       44794 : HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
    4479             :   // Code objects which should stay at a fixed address are allocated either
    4480             :   // in the first page of code space, in large object space, or (during
    4481             :   // snapshot creation) the containing page is marked as immovable.
    4482             :   DCHECK(!heap_object.is_null());
    4483             :   DCHECK(code_space_->Contains(heap_object));
    4484             :   DCHECK_GE(object_size, 0);
    4485       44794 :   if (!Heap::IsImmovable(heap_object)) {
    4486       80194 :     if (isolate()->serializer_enabled() ||
    4487       40094 :         code_space_->first_page()->Contains(heap_object->address())) {
    4488             :       MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
    4489             :     } else {
    4490             :       // Discard the first code allocation, which was on a page where it could
    4491             :       // be moved.
    4492             :       CreateFillerObjectAt(heap_object->address(), object_size,
    4493       40094 :                            ClearRecordedSlots::kNo);
    4494       40094 :       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
    4495             :       UnprotectAndRegisterMemoryChunk(heap_object);
    4496             :       ZapCodeObject(heap_object->address(), object_size);
    4497       40094 :       OnAllocationEvent(heap_object, object_size);
    4498             :     }
    4499             :   }
    4500       44794 :   return heap_object;
    4501             : }
    4502             : 
    4503   326600437 : HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
    4504             :                                            AllocationAlignment alignment) {
    4505             :   HeapObject result;
    4506   326600437 :   AllocationResult alloc = AllocateRaw(size, allocation, alignment);
    4507   326599795 :   if (alloc.To(&result)) {
    4508             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4509   326576937 :     return result;
    4510             :   }
    4511             :   // Two GCs before panicking. In newspace will almost always succeed.
    4512       22960 :   for (int i = 0; i < 2; i++) {
    4513             :     CollectGarbage(alloc.RetrySpace(),
    4514       22904 :                    GarbageCollectionReason::kAllocationFailure);
    4515       22904 :     alloc = AllocateRaw(size, allocation, alignment);
    4516       22904 :     if (alloc.To(&result)) {
    4517             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4518       22853 :       return result;
    4519             :     }
    4520             :   }
    4521           5 :   return HeapObject();
    4522             : }
    4523             : 
    4524   325010816 : HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
    4525             :                                             AllocationAlignment alignment) {
    4526             :   AllocationResult alloc;
    4527   325010816 :   HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
    4528   325010198 :   if (!result.is_null()) return result;
    4529             : 
    4530           5 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4531           5 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4532             :   {
    4533             :     AlwaysAllocateScope scope(isolate());
    4534           5 :     alloc = AllocateRaw(size, allocation, alignment);
    4535             :   }
    4536           5 :   if (alloc.To(&result)) {
    4537             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4538           5 :     return result;
    4539             :   }
    4540             :   // TODO(1181417): Fix this.
    4541           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4542             :   return HeapObject();
    4543             : }
    4544             : 
    4545             : // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
    4546             : // parameter and just do what's necessary.
    4547       40094 : HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
    4548       40094 :   AllocationResult alloc = code_lo_space()->AllocateRaw(size);
    4549             :   HeapObject result;
    4550       40094 :   if (alloc.To(&result)) {
    4551             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4552       40087 :     return result;
    4553             :   }
    4554             :   // Two GCs before panicking.
    4555           7 :   for (int i = 0; i < 2; i++) {
    4556             :     CollectGarbage(alloc.RetrySpace(),
    4557           7 :                    GarbageCollectionReason::kAllocationFailure);
    4558           7 :     alloc = code_lo_space()->AllocateRaw(size);
    4559           7 :     if (alloc.To(&result)) {
    4560             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4561           7 :       return result;
    4562             :     }
    4563             :   }
    4564           0 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4565           0 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4566             :   {
    4567             :     AlwaysAllocateScope scope(isolate());
    4568           0 :     alloc = code_lo_space()->AllocateRaw(size);
    4569             :   }
    4570           0 :   if (alloc.To(&result)) {
    4571             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4572           0 :     return result;
    4573             :   }
    4574             :   // TODO(1181417): Fix this.
    4575           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4576             :   return HeapObject();
    4577             : }
    4578             : 
    4579       62442 : void Heap::SetUp() {
    4580             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    4581             :   allocation_timeout_ = NextAllocationTimeout();
    4582             : #endif
    4583             : 
    4584             :   // Initialize heap spaces and initial maps and objects.
    4585             :   //
    4586             :   // If the heap is not yet configured (e.g. through the API), configure it.
    4587             :   // Configuration is based on the flags new-space-size (really the semispace
    4588             :   // size) and old-space-size if set or the initial values of semispace_size_
    4589             :   // and old_generation_size_ otherwise.
    4590       62442 :   if (!configured_) ConfigureHeapDefault();
    4591             : 
    4592             :   mmap_region_base_ =
    4593       62442 :       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
    4594       62442 :       ~kMmapRegionMask;
    4595             : 
    4596             :   // Set up memory allocator.
    4597      124884 :   memory_allocator_.reset(
    4598      124884 :       new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
    4599             : 
    4600       62442 :   store_buffer_.reset(new StoreBuffer(this));
    4601             : 
    4602       62442 :   heap_controller_.reset(new HeapController(this));
    4603             : 
    4604       62442 :   mark_compact_collector_.reset(new MarkCompactCollector(this));
    4605             : 
    4606       62442 :   scavenger_collector_.reset(new ScavengerCollector(this));
    4607             : 
    4608      124883 :   incremental_marking_.reset(
    4609             :       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
    4610      124882 :                              mark_compact_collector_->weak_objects()));
    4611             : 
    4612       62441 :   if (FLAG_concurrent_marking || FLAG_parallel_marking) {
    4613             :     MarkCompactCollector::MarkingWorklist* marking_worklist =
    4614             :         mark_compact_collector_->marking_worklist();
    4615      124663 :     concurrent_marking_.reset(new ConcurrentMarking(
    4616             :         this, marking_worklist->shared(), marking_worklist->on_hold(),
    4617      124664 :         mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
    4618             :   } else {
    4619         220 :     concurrent_marking_.reset(
    4620         220 :         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
    4621             :   }
    4622             : 
    4623     1061498 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4624      499528 :     space_[i] = nullptr;
    4625             :   }
    4626       62442 : }
    4627             : 
    4628       62441 : void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
    4629             :   DCHECK_NOT_NULL(ro_heap);
    4630             :   DCHECK_IMPLIES(read_only_space_ != nullptr,
    4631             :                  read_only_space_ == ro_heap->read_only_space());
    4632       62441 :   read_only_heap_ = ro_heap;
    4633       62441 :   space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
    4634       62441 : }
    4635             : 
    4636       62442 : void Heap::SetUpSpaces() {
    4637             :   // Ensure SetUpFromReadOnlySpace has been ran.
    4638             :   DCHECK_NOT_NULL(read_only_space_);
    4639             :   space_[NEW_SPACE] = new_space_ =
    4640             :       new NewSpace(this, memory_allocator_->data_page_allocator(),
    4641       62442 :                    initial_semispace_size_, max_semi_space_size_);
    4642       62442 :   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
    4643       62442 :   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
    4644       62442 :   space_[MAP_SPACE] = map_space_ = new MapSpace(this);
    4645       62442 :   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
    4646             :   space_[NEW_LO_SPACE] = new_lo_space_ =
    4647      124884 :       new NewLargeObjectSpace(this, new_space_->Capacity());
    4648       62442 :   space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
    4649             : 
    4650     9553626 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
    4651             :        i++) {
    4652     4745592 :     deferred_counters_[i] = 0;
    4653             :   }
    4654             : 
    4655       62442 :   tracer_.reset(new GCTracer(this));
    4656             : #ifdef ENABLE_MINOR_MC
    4657       62442 :   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
    4658             : #else
    4659             :   minor_mark_compact_collector_ = nullptr;
    4660             : #endif  // ENABLE_MINOR_MC
    4661      124884 :   array_buffer_collector_.reset(new ArrayBufferCollector(this));
    4662       62442 :   gc_idle_time_handler_.reset(new GCIdleTimeHandler());
    4663       62442 :   memory_reducer_.reset(new MemoryReducer(this));
    4664       62442 :   if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
    4665           0 :     live_object_stats_.reset(new ObjectStats(this));
    4666           0 :     dead_object_stats_.reset(new ObjectStats(this));
    4667             :   }
    4668      124884 :   local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
    4669             : 
    4670       62442 :   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
    4671       62442 :   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
    4672             : 
    4673       62442 :   store_buffer()->SetUp();
    4674             : 
    4675       62442 :   mark_compact_collector()->SetUp();
    4676             : #ifdef ENABLE_MINOR_MC
    4677       62442 :   if (minor_mark_compact_collector() != nullptr) {
    4678       62442 :     minor_mark_compact_collector()->SetUp();
    4679             :   }
    4680             : #endif  // ENABLE_MINOR_MC
    4681             : 
    4682       62442 :   if (FLAG_idle_time_scavenge) {
    4683       62442 :     scavenge_job_.reset(new ScavengeJob());
    4684       62442 :     idle_scavenge_observer_.reset(new IdleScavengeObserver(
    4685             :         *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
    4686      124884 :     new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
    4687             :   }
    4688             : 
    4689             :   SetGetExternallyAllocatedMemoryInBytesCallback(
    4690             :       DefaultGetExternallyAllocatedMemoryInBytesCallback);
    4691             : 
    4692       62442 :   if (FLAG_stress_marking > 0) {
    4693           0 :     stress_marking_percentage_ = NextStressMarkingLimit();
    4694           0 :     stress_marking_observer_ = new StressMarkingObserver(*this);
    4695             :     AddAllocationObserversToAllSpaces(stress_marking_observer_,
    4696           0 :                                       stress_marking_observer_);
    4697             :   }
    4698       62442 :   if (FLAG_stress_scavenge > 0) {
    4699           0 :     stress_scavenge_observer_ = new StressScavengeObserver(*this);
    4700           0 :     new_space()->AddAllocationObserver(stress_scavenge_observer_);
    4701             :   }
    4702             : 
    4703       62442 :   write_protect_code_memory_ = FLAG_write_protect_code_memory;
    4704       62442 : }
    4705             : 
    4706       62382 : void Heap::InitializeHashSeed() {
    4707             :   DCHECK(!deserialization_complete_);
    4708             :   uint64_t new_hash_seed;
    4709       62382 :   if (FLAG_hash_seed == 0) {
    4710       62312 :     int64_t rnd = isolate()->random_number_generator()->NextInt64();
    4711       62312 :     new_hash_seed = static_cast<uint64_t>(rnd);
    4712             :   } else {
    4713          70 :     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
    4714             :   }
    4715             :   ReadOnlyRoots(this).hash_seed()->copy_in(
    4716             :       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
    4717       62382 : }
    4718             : 
    4719    17198540 : void Heap::SetStackLimits() {
    4720             :   DCHECK_NOT_NULL(isolate_);
    4721             :   DCHECK(isolate_ == isolate());
    4722             :   // On 64 bit machines, pointers are generally out of range of Smis.  We write
    4723             :   // something that looks like an out of range Smi to the GC.
    4724             : 
    4725             :   // Set up the special root array entries containing the stack limits.
    4726             :   // These are actually addresses, but the tag makes the GC ignore it.
    4727             :   roots_table()[RootIndex::kStackLimit] =
    4728    34397080 :       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
    4729             :   roots_table()[RootIndex::kRealStackLimit] =
    4730    17198540 :       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
    4731    17198540 : }
    4732             : 
    4733         261 : void Heap::ClearStackLimits() {
    4734         261 :   roots_table()[RootIndex::kStackLimit] = kNullAddress;
    4735         261 :   roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
    4736         261 : }
    4737             : 
    4738           0 : int Heap::NextAllocationTimeout(int current_timeout) {
    4739           0 :   if (FLAG_random_gc_interval > 0) {
    4740             :     // If current timeout hasn't reached 0 the GC was caused by something
    4741             :     // different than --stress-atomic-gc flag and we don't update the timeout.
    4742           0 :     if (current_timeout <= 0) {
    4743           0 :       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
    4744             :     } else {
    4745             :       return current_timeout;
    4746             :     }
    4747             :   }
    4748           0 :   return FLAG_gc_interval;
    4749             : }
    4750             : 
    4751           0 : void Heap::PrintAllocationsHash() {
    4752           0 :   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
    4753           0 :   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
    4754           0 : }
    4755             : 
    4756           0 : void Heap::PrintMaxMarkingLimitReached() {
    4757           0 :   PrintF("\n### Maximum marking limit reached = %.02lf\n",
    4758           0 :          max_marking_limit_reached_);
    4759           0 : }
    4760             : 
    4761           0 : void Heap::PrintMaxNewSpaceSizeReached() {
    4762           0 :   PrintF("\n### Maximum new space size reached = %.02lf\n",
    4763           0 :          stress_scavenge_observer_->MaxNewSpaceSizeReached());
    4764           0 : }
    4765             : 
    4766           0 : int Heap::NextStressMarkingLimit() {
    4767           0 :   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
    4768             : }
    4769             : 
    4770       62442 : void Heap::NotifyDeserializationComplete() {
    4771             :   PagedSpaces spaces(this);
    4772      249768 :   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
    4773      187326 :     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
    4774             : #ifdef DEBUG
    4775             :     // All pages right after bootstrapping must be marked as never-evacuate.
    4776             :     for (Page* p : *s) {
    4777             :       DCHECK(p->NeverEvacuate());
    4778             :     }
    4779             : #endif  // DEBUG
    4780             :   }
    4781             : 
    4782       62442 :   deserialization_complete_ = true;
    4783       62442 : }
    4784             : 
    4785       91732 : void Heap::NotifyBootstrapComplete() {
    4786             :   // This function is invoked for each native context creation. We are
    4787             :   // interested only in the first native context.
    4788       91732 :   if (old_generation_capacity_after_bootstrap_ == 0) {
    4789       59712 :     old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
    4790             :   }
    4791       91732 : }
    4792             : 
    4793      523723 : void Heap::NotifyOldGenerationExpansion() {
    4794             :   const size_t kMemoryReducerActivationThreshold = 1 * MB;
    4795     1294005 :   if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
    4796      107090 :       OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
    4797      551461 :                                      kMemoryReducerActivationThreshold &&
    4798             :       FLAG_memory_reducer_for_small_heaps) {
    4799             :     MemoryReducer::Event event;
    4800       27738 :     event.type = MemoryReducer::kPossibleGarbage;
    4801       27738 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    4802       27738 :     memory_reducer()->NotifyPossibleGarbage(event);
    4803             :   }
    4804      523724 : }
    4805             : 
    4806         150 : void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
    4807             :   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
    4808         150 :   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
    4809         150 : }
    4810             : 
    4811           0 : EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
    4812           0 :   return local_embedder_heap_tracer()->remote_tracer();
    4813             : }
    4814             : 
    4815           5 : void Heap::RegisterExternallyReferencedObject(Address* location) {
    4816             :   // The embedder is not aware of whether numbers are materialized as heap
    4817             :   // objects are just passed around as Smis.
    4818           5 :   Object object(*location);
    4819           5 :   if (!object->IsHeapObject()) return;
    4820             :   HeapObject heap_object = HeapObject::cast(object);
    4821             :   DCHECK(Contains(heap_object));
    4822          10 :   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
    4823           0 :     incremental_marking()->WhiteToGreyAndPush(heap_object);
    4824             :   } else {
    4825             :     DCHECK(mark_compact_collector()->in_use());
    4826             :     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
    4827             :   }
    4828             : }
    4829             : 
    4830      124854 : void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
    4831             : 
    4832       62426 : void Heap::TearDown() {
    4833             :   DCHECK_EQ(gc_state_, TEAR_DOWN);
    4834             : #ifdef VERIFY_HEAP
    4835             :   if (FLAG_verify_heap) {
    4836             :     Verify();
    4837             :   }
    4838             : #endif
    4839             : 
    4840             :   UpdateMaximumCommitted();
    4841             : 
    4842       62426 :   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
    4843           0 :     PrintAllocationsHash();
    4844             :   }
    4845             : 
    4846       62427 :   if (FLAG_fuzzer_gc_analysis) {
    4847           0 :     if (FLAG_stress_marking > 0) {
    4848             :       PrintMaxMarkingLimitReached();
    4849             :     }
    4850           0 :     if (FLAG_stress_scavenge > 0) {
    4851           0 :       PrintMaxNewSpaceSizeReached();
    4852             :     }
    4853             :   }
    4854             : 
    4855       62427 :   if (FLAG_idle_time_scavenge) {
    4856      124852 :     new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
    4857             :     idle_scavenge_observer_.reset();
    4858             :     scavenge_job_.reset();
    4859             :   }
    4860             : 
    4861       62428 :   if (FLAG_stress_marking > 0) {
    4862           0 :     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
    4863           0 :                                            stress_marking_observer_);
    4864           0 :     delete stress_marking_observer_;
    4865           0 :     stress_marking_observer_ = nullptr;
    4866             :   }
    4867       62428 :   if (FLAG_stress_scavenge > 0) {
    4868           0 :     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
    4869           0 :     delete stress_scavenge_observer_;
    4870           0 :     stress_scavenge_observer_ = nullptr;
    4871             :   }
    4872             : 
    4873             :   heap_controller_.reset();
    4874             : 
    4875       62428 :   if (mark_compact_collector_) {
    4876       62427 :     mark_compact_collector_->TearDown();
    4877             :     mark_compact_collector_.reset();
    4878             :   }
    4879             : 
    4880             : #ifdef ENABLE_MINOR_MC
    4881       62426 :   if (minor_mark_compact_collector_ != nullptr) {
    4882       62425 :     minor_mark_compact_collector_->TearDown();
    4883       62425 :     delete minor_mark_compact_collector_;
    4884       62426 :     minor_mark_compact_collector_ = nullptr;
    4885             :   }
    4886             : #endif  // ENABLE_MINOR_MC
    4887             : 
    4888             :   scavenger_collector_.reset();
    4889       62427 :   array_buffer_collector_.reset();
    4890       62427 :   incremental_marking_.reset();
    4891       62427 :   concurrent_marking_.reset();
    4892             : 
    4893             :   gc_idle_time_handler_.reset();
    4894             : 
    4895       62427 :   if (memory_reducer_ != nullptr) {
    4896       62427 :     memory_reducer_->TearDown();
    4897             :     memory_reducer_.reset();
    4898             :   }
    4899             : 
    4900             :   live_object_stats_.reset();
    4901             :   dead_object_stats_.reset();
    4902             : 
    4903       62427 :   local_embedder_heap_tracer_.reset();
    4904             : 
    4905       62426 :   external_string_table_.TearDown();
    4906             : 
    4907             :   // Tear down all ArrayBuffers before tearing down the heap since  their
    4908             :   // byte_length may be a HeapNumber which is required for freeing the backing
    4909             :   // store.
    4910       62427 :   ArrayBufferTracker::TearDown(this);
    4911             : 
    4912       62427 :   tracer_.reset();
    4913             : 
    4914       62427 :   read_only_heap_->OnHeapTearDown();
    4915       62426 :   space_[RO_SPACE] = read_only_space_ = nullptr;
    4916      936404 :   for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
    4917      436989 :     delete space_[i];
    4918      436989 :     space_[i] = nullptr;
    4919             :   }
    4920             : 
    4921       62426 :   store_buffer()->TearDown();
    4922             : 
    4923       62427 :   memory_allocator()->TearDown();
    4924             : 
    4925             :   StrongRootsList* next = nullptr;
    4926       62427 :   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
    4927           0 :     next = list->next;
    4928           0 :     delete list;
    4929             :   }
    4930       62427 :   strong_roots_list_ = nullptr;
    4931             : 
    4932             :   store_buffer_.reset();
    4933       62427 :   memory_allocator_.reset();
    4934       62427 : }
    4935             : 
    4936          35 : void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4937             :                                  GCType gc_type, void* data) {
    4938             :   DCHECK_NOT_NULL(callback);
    4939             :   DCHECK(gc_prologue_callbacks_.end() ==
    4940             :          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
    4941             :                    GCCallbackTuple(callback, gc_type, data)));
    4942          35 :   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
    4943          35 : }
    4944             : 
    4945          35 : void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4946             :                                     void* data) {
    4947             :   DCHECK_NOT_NULL(callback);
    4948          35 :   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
    4949          70 :     if (gc_prologue_callbacks_[i].callback == callback &&
    4950          35 :         gc_prologue_callbacks_[i].data == data) {
    4951             :       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
    4952             :       gc_prologue_callbacks_.pop_back();
    4953          35 :       return;
    4954             :     }
    4955             :   }
    4956           0 :   UNREACHABLE();
    4957             : }
    4958             : 
    4959       71018 : void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4960             :                                  GCType gc_type, void* data) {
    4961             :   DCHECK_NOT_NULL(callback);
    4962             :   DCHECK(gc_epilogue_callbacks_.end() ==
    4963             :          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
    4964             :                    GCCallbackTuple(callback, gc_type, data)));
    4965       71018 :   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
    4966       71018 : }
    4967             : 
    4968        8576 : void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4969             :                                     void* data) {
    4970             :   DCHECK_NOT_NULL(callback);
    4971       25728 :   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
    4972       25728 :     if (gc_epilogue_callbacks_[i].callback == callback &&
    4973        8576 :         gc_epilogue_callbacks_[i].data == data) {
    4974             :       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
    4975             :       gc_epilogue_callbacks_.pop_back();
    4976        8576 :       return;
    4977             :     }
    4978             :   }
    4979           0 :   UNREACHABLE();
    4980             : }
    4981             : 
    4982             : namespace {
    4983         392 : Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
    4984             :                                            Handle<WeakArrayList> array,
    4985             :                                            AllocationType allocation) {
    4986         392 :   if (array->length() == 0) {
    4987           0 :     return array;
    4988             :   }
    4989         392 :   int new_length = array->CountLiveWeakReferences();
    4990         392 :   if (new_length == array->length()) {
    4991         282 :     return array;
    4992             :   }
    4993             : 
    4994             :   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
    4995             :       heap->isolate(),
    4996             :       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
    4997         110 :       new_length, allocation);
    4998             :   // Allocation might have caused GC and turned some of the elements into
    4999             :   // cleared weak heap objects. Count the number of live references again and
    5000             :   // fill in the new array.
    5001             :   int copy_to = 0;
    5002       19780 :   for (int i = 0; i < array->length(); i++) {
    5003             :     MaybeObject element = array->Get(i);
    5004        9835 :     if (element->IsCleared()) continue;
    5005       19020 :     new_array->Set(copy_to++, element);
    5006             :   }
    5007             :   new_array->set_length(copy_to);
    5008         110 :   return new_array;
    5009             : }
    5010             : 
    5011             : }  // anonymous namespace
    5012             : 
    5013         196 : void Heap::CompactWeakArrayLists(AllocationType allocation) {
    5014             :   // Find known PrototypeUsers and compact them.
    5015             :   std::vector<Handle<PrototypeInfo>> prototype_infos;
    5016             :   {
    5017         392 :     HeapIterator iterator(this);
    5018     1615041 :     for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
    5019     1614845 :       if (o->IsPrototypeInfo()) {
    5020             :         PrototypeInfo prototype_info = PrototypeInfo::cast(o);
    5021       13187 :         if (prototype_info->prototype_users()->IsWeakArrayList()) {
    5022          25 :           prototype_infos.emplace_back(handle(prototype_info, isolate()));
    5023             :         }
    5024             :       }
    5025             :     }
    5026             :   }
    5027         221 :   for (auto& prototype_info : prototype_infos) {
    5028             :     Handle<WeakArrayList> array(
    5029             :         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
    5030             :     DCHECK_IMPLIES(allocation == AllocationType::kOld,
    5031             :                    InOldSpace(*array) ||
    5032             :                        *array == ReadOnlyRoots(this).empty_weak_array_list());
    5033             :     WeakArrayList new_array = PrototypeUsers::Compact(
    5034          25 :         array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
    5035          25 :     prototype_info->set_prototype_users(new_array);
    5036             :   }
    5037             : 
    5038             :   // Find known WeakArrayLists and compact them.
    5039             :   Handle<WeakArrayList> scripts(script_list(), isolate());
    5040             :   DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
    5041         196 :   scripts = CompactWeakArrayList(this, scripts, allocation);
    5042             :   set_script_list(*scripts);
    5043             : 
    5044             :   Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
    5045             :                                        isolate());
    5046             :   DCHECK_IMPLIES(allocation == AllocationType::kOld,
    5047             :                  InOldSpace(*no_script_list));
    5048         196 :   no_script_list = CompactWeakArrayList(this, no_script_list, allocation);
    5049             :   set_noscript_shared_function_infos(*no_script_list);
    5050         196 : }
    5051             : 
    5052      132925 : void Heap::AddRetainedMap(Handle<Map> map) {
    5053      132925 :   if (map->is_in_retained_map_list()) {
    5054             :     return;
    5055             :   }
    5056             :   Handle<WeakArrayList> array(retained_maps(), isolate());
    5057       46713 :   if (array->IsFull()) {
    5058       12145 :     CompactRetainedMaps(*array);
    5059             :   }
    5060             :   array =
    5061       46713 :       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
    5062             :   array = WeakArrayList::AddToEnd(
    5063             :       isolate(), array,
    5064       93426 :       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
    5065       46713 :   if (*array != retained_maps()) {
    5066             :     set_retained_maps(*array);
    5067             :   }
    5068             :   map->set_is_in_retained_map_list(true);
    5069             : }
    5070             : 
    5071       12145 : void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
    5072             :   DCHECK_EQ(retained_maps, this->retained_maps());
    5073             :   int length = retained_maps->length();
    5074             :   int new_length = 0;
    5075             :   int new_number_of_disposed_maps = 0;
    5076             :   // This loop compacts the array by removing cleared weak cells.
    5077       89925 :   for (int i = 0; i < length; i += 2) {
    5078             :     MaybeObject maybe_object = retained_maps->Get(i);
    5079       38890 :     if (maybe_object->IsCleared()) {
    5080        8850 :       continue;
    5081             :     }
    5082             : 
    5083             :     DCHECK(maybe_object->IsWeak());
    5084             : 
    5085       30040 :     MaybeObject age = retained_maps->Get(i + 1);
    5086             :     DCHECK(age->IsSmi());
    5087       30040 :     if (i != new_length) {
    5088        3266 :       retained_maps->Set(new_length, maybe_object);
    5089        3266 :       retained_maps->Set(new_length + 1, age);
    5090             :     }
    5091       30040 :     if (i < number_of_disposed_maps_) {
    5092          74 :       new_number_of_disposed_maps += 2;
    5093             :     }
    5094       30040 :     new_length += 2;
    5095             :   }
    5096       12145 :   number_of_disposed_maps_ = new_number_of_disposed_maps;
    5097             :   HeapObject undefined = ReadOnlyRoots(this).undefined_value();
    5098       47545 :   for (int i = new_length; i < length; i++) {
    5099       17700 :     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
    5100             :   }
    5101       12145 :   if (new_length != length) retained_maps->set_length(new_length);
    5102       12145 : }
    5103             : 
    5104           0 : void Heap::FatalProcessOutOfMemory(const char* location) {
    5105           0 :   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
    5106             : }
    5107             : 
    5108             : #ifdef DEBUG
    5109             : 
    5110             : class PrintHandleVisitor : public RootVisitor {
    5111             :  public:
    5112             :   void VisitRootPointers(Root root, const char* description,
    5113             :                          FullObjectSlot start, FullObjectSlot end) override {
    5114             :     for (FullObjectSlot p = start; p < end; ++p)
    5115             :       PrintF("  handle %p to %p\n", p.ToVoidPtr(),
    5116             :              reinterpret_cast<void*>((*p).ptr()));
    5117             :   }
    5118             : };
    5119             : 
    5120             : 
    5121             : void Heap::PrintHandles() {
    5122             :   PrintF("Handles:\n");
    5123             :   PrintHandleVisitor v;
    5124             :   isolate_->handle_scope_implementer()->Iterate(&v);
    5125             : }
    5126             : 
    5127             : #endif
    5128             : 
    5129             : class CheckHandleCountVisitor : public RootVisitor {
    5130             :  public:
    5131           0 :   CheckHandleCountVisitor() : handle_count_(0) {}
    5132           0 :   ~CheckHandleCountVisitor() override {
    5133           0 :     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
    5134           0 :   }
    5135           0 :   void VisitRootPointers(Root root, const char* description,
    5136             :                          FullObjectSlot start, FullObjectSlot end) override {
    5137           0 :     handle_count_ += end - start;
    5138           0 :   }
    5139             : 
    5140             :  private:
    5141             :   ptrdiff_t handle_count_;
    5142             : };
    5143             : 
    5144             : 
    5145           0 : void Heap::CheckHandleCount() {
    5146             :   CheckHandleCountVisitor v;
    5147           0 :   isolate_->handle_scope_implementer()->Iterate(&v);
    5148           0 : }
    5149             : 
    5150       62554 : Address* Heap::store_buffer_top_address() {
    5151       62554 :   return store_buffer()->top_address();
    5152             : }
    5153             : 
    5154             : // static
    5155         112 : intptr_t Heap::store_buffer_mask_constant() {
    5156         112 :   return StoreBuffer::kStoreBufferMask;
    5157             : }
    5158             : 
    5159             : // static
    5160       62558 : Address Heap::store_buffer_overflow_function_address() {
    5161       62558 :   return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
    5162             : }
    5163             : 
    5164        5396 : void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
    5165             :   DCHECK(!IsLargeObject(object));
    5166             :   Page* page = Page::FromAddress(slot.address());
    5167        9734 :   if (!page->InYoungGeneration()) {
    5168             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5169             :     store_buffer()->DeleteEntry(slot.address());
    5170             :   }
    5171        5396 : }
    5172             : 
    5173             : #ifdef DEBUG
    5174             : void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
    5175             :   DCHECK(!IsLargeObject(object));
    5176             :   if (InYoungGeneration(object)) return;
    5177             :   Page* page = Page::FromAddress(slot.address());
    5178             :   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5179             :   store_buffer()->MoveAllEntriesToRememberedSet();
    5180             :   CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
    5181             :   // Old to old slots are filtered with invalidated slots.
    5182             :   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
    5183             :                 page->RegisteredObjectWithInvalidatedSlots(object));
    5184             : }
    5185             : #endif
    5186             : 
    5187      283898 : void Heap::ClearRecordedSlotRange(Address start, Address end) {
    5188             :   Page* page = Page::FromAddress(start);
    5189             :   DCHECK(!page->IsLargePage());
    5190     2047308 :   if (!page->InYoungGeneration()) {
    5191             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5192             :     store_buffer()->DeleteEntry(start, end);
    5193             :   }
    5194      283898 : }
    5195             : 
    5196    27358884 : PagedSpace* PagedSpaces::next() {
    5197    27358884 :   switch (counter_++) {
    5198             :     case RO_SPACE:
    5199             :       // skip NEW_SPACE
    5200     5208303 :       counter_++;
    5201     5208303 :       return heap_->read_only_space();
    5202             :     case OLD_SPACE:
    5203     5537659 :       return heap_->old_space();
    5204             :     case CODE_SPACE:
    5205     5537660 :       return heap_->code_space();
    5206             :     case MAP_SPACE:
    5207     5537660 :       return heap_->map_space();
    5208             :     default:
    5209             :       return nullptr;
    5210             :   }
    5211             : }
    5212             : 
    5213      219177 : SpaceIterator::SpaceIterator(Heap* heap)
    5214      227052 :     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
    5215             : 
    5216             : SpaceIterator::~SpaceIterator() = default;
    5217             : 
    5218     1972593 : bool SpaceIterator::has_next() {
    5219             :   // Iterate until no more spaces.
    5220     2035593 :   return current_space_ != LAST_SPACE;
    5221             : }
    5222             : 
    5223     1753416 : Space* SpaceIterator::next() {
    5224             :   DCHECK(has_next());
    5225     8958736 :   return heap_->space(++current_space_);
    5226             : }
    5227             : 
    5228             : 
    5229        1290 : class HeapObjectsFilter {
    5230             :  public:
    5231        1290 :   virtual ~HeapObjectsFilter() = default;
    5232             :   virtual bool SkipObject(HeapObject object) = 0;
    5233             : };
    5234             : 
    5235             : 
    5236             : class UnreachableObjectsFilter : public HeapObjectsFilter {
    5237             :  public:
    5238        1290 :   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
    5239        1290 :     MarkReachableObjects();
    5240             :   }
    5241             : 
    5242        3870 :   ~UnreachableObjectsFilter() override {
    5243       11582 :     for (auto it : reachable_) {
    5244       20584 :       delete it.second;
    5245             :       it.second = nullptr;
    5246             :     }
    5247        2580 :   }
    5248             : 
    5249    11021762 :   bool SkipObject(HeapObject object) override {
    5250    11021762 :     if (object->IsFiller()) return true;
    5251    11021762 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5252    11021762 :     if (reachable_.count(chunk) == 0) return true;
    5253    22043362 :     return reachable_[chunk]->count(object) == 0;
    5254             :   }
    5255             : 
    5256             :  private:
    5257    55844173 :   bool MarkAsReachable(HeapObject object) {
    5258    55844173 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5259    55844173 :     if (reachable_.count(chunk) == 0) {
    5260       20584 :       reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
    5261             :     }
    5262   111688346 :     if (reachable_[chunk]->count(object)) return false;
    5263    10479419 :     reachable_[chunk]->insert(object);
    5264    10479419 :     return true;
    5265             :   }
    5266             : 
    5267        2580 :   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
    5268             :    public:
    5269             :     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
    5270        1290 :         : filter_(filter) {}
    5271             : 
    5272    22522178 :     void VisitPointers(HeapObject host, ObjectSlot start,
    5273             :                        ObjectSlot end) override {
    5274    22522178 :       MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
    5275    22522178 :     }
    5276             : 
    5277     1205065 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
    5278             :                        MaybeObjectSlot end) final {
    5279     1205065 :       MarkPointers(start, end);
    5280     1205065 :     }
    5281             : 
    5282        9030 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    5283        9030 :       Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5284             :       MarkHeapObject(target);
    5285        9030 :     }
    5286       40170 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    5287             :       MarkHeapObject(rinfo->target_object());
    5288       40170 :     }
    5289             : 
    5290     3993063 :     void VisitRootPointers(Root root, const char* description,
    5291             :                            FullObjectSlot start, FullObjectSlot end) override {
    5292             :       MarkPointersImpl(start, end);
    5293     3993063 :     }
    5294             : 
    5295        1290 :     void TransitiveClosure() {
    5296    20960128 :       while (!marking_stack_.empty()) {
    5297    10479419 :         HeapObject obj = marking_stack_.back();
    5298             :         marking_stack_.pop_back();
    5299    10479419 :         obj->Iterate(this);
    5300             :       }
    5301        1290 :     }
    5302             : 
    5303             :    private:
    5304    23727243 :     void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
    5305             :       MarkPointersImpl(start, end);
    5306    23727243 :     }
    5307             : 
    5308             :     template <typename TSlot>
    5309             :     V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
    5310             :       // Treat weak references as strong.
    5311    92975628 :       for (TSlot p = start; p < end; ++p) {
    5312             :         typename TSlot::TObject object = *p;
    5313             :         HeapObject heap_object;
    5314    65255322 :         if (object.GetHeapObject(&heap_object)) {
    5315             :           MarkHeapObject(heap_object);
    5316             :         }
    5317             :       }
    5318             :     }
    5319             : 
    5320             :     V8_INLINE void MarkHeapObject(HeapObject heap_object) {
    5321    55844173 :       if (filter_->MarkAsReachable(heap_object)) {
    5322    10479419 :         marking_stack_.push_back(heap_object);
    5323             :       }
    5324             :     }
    5325             : 
    5326             :     UnreachableObjectsFilter* filter_;
    5327             :     std::vector<HeapObject> marking_stack_;
    5328             :   };
    5329             : 
    5330             :   friend class MarkingVisitor;
    5331             : 
    5332        1290 :   void MarkReachableObjects() {
    5333             :     MarkingVisitor visitor(this);
    5334        1290 :     heap_->IterateRoots(&visitor, VISIT_ALL);
    5335        1290 :     visitor.TransitiveClosure();
    5336        1290 :   }
    5337             : 
    5338             :   Heap* heap_;
    5339             :   DisallowHeapAllocation no_allocation_;
    5340             :   std::unordered_map<MemoryChunk*,
    5341             :                      std::unordered_set<HeapObject, Object::Hasher>*>
    5342             :       reachable_;
    5343             : };
    5344             : 
    5345        7875 : HeapIterator::HeapIterator(Heap* heap,
    5346             :                            HeapIterator::HeapObjectsFiltering filtering)
    5347             :     : heap_(heap),
    5348             :       filtering_(filtering),
    5349             :       filter_(nullptr),
    5350             :       space_iterator_(nullptr),
    5351        7875 :       object_iterator_(nullptr) {
    5352             :   heap_->MakeHeapIterable();
    5353             :   // Start the iteration.
    5354       15750 :   space_iterator_ = new SpaceIterator(heap_);
    5355        7875 :   switch (filtering_) {
    5356             :     case kFilterUnreachable:
    5357        2580 :       filter_ = new UnreachableObjectsFilter(heap_);
    5358        1290 :       break;
    5359             :     default:
    5360             :       break;
    5361             :   }
    5362       23625 :   object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5363        7875 : }
    5364             : 
    5365             : 
    5366       15750 : HeapIterator::~HeapIterator() {
    5367             : #ifdef DEBUG
    5368             :   // Assert that in filtering mode we have iterated through all
    5369             :   // objects. Otherwise, heap will be left in an inconsistent state.
    5370             :   if (filtering_ != kNoFiltering) {
    5371             :     DCHECK_NULL(object_iterator_);
    5372             :   }
    5373             : #endif
    5374        7875 :   delete space_iterator_;
    5375        7875 :   delete filter_;
    5376        7875 : }
    5377             : 
    5378    90487254 : HeapObject HeapIterator::next() {
    5379    90487254 :   if (filter_ == nullptr) return NextObject();
    5380             : 
    5381    10480709 :   HeapObject obj = NextObject();
    5382    11023052 :   while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
    5383    10480709 :   return obj;
    5384             : }
    5385             : 
    5386    91029597 : HeapObject HeapIterator::NextObject() {
    5387             :   // No iterator means we are done.
    5388    91029597 :   if (object_iterator_.get() == nullptr) return HeapObject();
    5389             : 
    5390    91029597 :   HeapObject obj = object_iterator_.get()->Next();
    5391    91029597 :   if (!obj.is_null()) {
    5392             :     // If the current iterator has more objects we are fine.
    5393    90991281 :     return obj;
    5394             :   } else {
    5395             :     // Go though the spaces looking for one that has objects.
    5396      126000 :     while (space_iterator_->has_next()) {
    5397      110250 :       object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5398       55125 :       obj = object_iterator_.get()->Next();
    5399       55125 :       if (!obj.is_null()) {
    5400       30441 :         return obj;
    5401             :       }
    5402             :     }
    5403             :   }
    5404             :   // Done with the last space.
    5405             :   object_iterator_.reset(nullptr);
    5406        7875 :   return HeapObject();
    5407             : }
    5408             : 
    5409       94924 : void Heap::UpdateTotalGCTime(double duration) {
    5410       94924 :   if (FLAG_trace_gc_verbose) {
    5411           0 :     total_gc_time_ms_ += duration;
    5412             :   }
    5413       94924 : }
    5414             : 
    5415       68846 : void Heap::ExternalStringTable::CleanUpYoung() {
    5416             :   int last = 0;
    5417       68846 :   Isolate* isolate = heap_->isolate();
    5418       69324 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5419         239 :     Object o = young_strings_[i];
    5420         239 :     if (o->IsTheHole(isolate)) {
    5421         227 :       continue;
    5422             :     }
    5423             :     // The real external string is already in one of these vectors and was or
    5424             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5425          12 :     if (o->IsThinString()) continue;
    5426             :     DCHECK(o->IsExternalString());
    5427          12 :     if (InYoungGeneration(o)) {
    5428          24 :       young_strings_[last++] = o;
    5429             :     } else {
    5430           0 :       old_strings_.push_back(o);
    5431             :     }
    5432             :   }
    5433       68846 :   young_strings_.resize(last);
    5434       68846 : }
    5435             : 
    5436       68846 : void Heap::ExternalStringTable::CleanUpAll() {
    5437       68846 :   CleanUpYoung();
    5438             :   int last = 0;
    5439       68846 :   Isolate* isolate = heap_->isolate();
    5440      276486 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5441      103820 :     Object o = old_strings_[i];
    5442      103820 :     if (o->IsTheHole(isolate)) {
    5443             :       continue;
    5444             :     }
    5445             :     // The real external string is already in one of these vectors and was or
    5446             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5447      102409 :     if (o->IsThinString()) continue;
    5448             :     DCHECK(o->IsExternalString());
    5449             :     DCHECK(!InYoungGeneration(o));
    5450      204818 :     old_strings_[last++] = o;
    5451             :   }
    5452       68846 :   old_strings_.resize(last);
    5453             : #ifdef VERIFY_HEAP
    5454             :   if (FLAG_verify_heap) {
    5455             :     Verify();
    5456             :   }
    5457             : #endif
    5458       68846 : }
    5459             : 
    5460       62426 : void Heap::ExternalStringTable::TearDown() {
    5461       62664 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5462         119 :     Object o = young_strings_[i];
    5463             :     // Dont finalize thin strings.
    5464         119 :     if (o->IsThinString()) continue;
    5465         110 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5466             :   }
    5467             :   young_strings_.clear();
    5468      234694 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5469       86133 :     Object o = old_strings_[i];
    5470             :     // Dont finalize thin strings.
    5471       86133 :     if (o->IsThinString()) continue;
    5472       86133 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5473             :   }
    5474             :   old_strings_.clear();
    5475       62427 : }
    5476             : 
    5477             : 
    5478      965060 : void Heap::RememberUnmappedPage(Address page, bool compacted) {
    5479             :   // Tag the page pointer to make it findable in the dump file.
    5480      965060 :   if (compacted) {
    5481        9731 :     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
    5482             :   } else {
    5483      955329 :     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
    5484             :   }
    5485     1027501 :   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
    5486     1027501 :   remembered_unmapped_pages_index_++;
    5487     1027501 :   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
    5488      965060 : }
    5489             : 
    5490     3418557 : void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
    5491     3418557 :   StrongRootsList* list = new StrongRootsList();
    5492     3418566 :   list->next = strong_roots_list_;
    5493     3418566 :   list->start = start;
    5494     3418566 :   list->end = end;
    5495     3418566 :   strong_roots_list_ = list;
    5496     3418566 : }
    5497             : 
    5498     3418555 : void Heap::UnregisterStrongRoots(FullObjectSlot start) {
    5499             :   StrongRootsList* prev = nullptr;
    5500     3418555 :   StrongRootsList* list = strong_roots_list_;
    5501    10293727 :   while (list != nullptr) {
    5502     6875173 :     StrongRootsList* next = list->next;
    5503     6875173 :     if (list->start == start) {
    5504     3418552 :       if (prev) {
    5505         773 :         prev->next = next;
    5506             :       } else {
    5507     3417779 :         strong_roots_list_ = next;
    5508             :       }
    5509     3418552 :       delete list;
    5510             :     } else {
    5511             :       prev = list;
    5512             :     }
    5513             :     list = next;
    5514             :   }
    5515     3418554 : }
    5516             : 
    5517          56 : void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
    5518             :   set_builtins_constants_table(cache);
    5519          56 : }
    5520             : 
    5521          56 : void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
    5522             :   DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
    5523             :   set_interpreter_entry_trampoline_for_profiling(code);
    5524          56 : }
    5525             : 
    5526         208 : void Heap::AddDirtyJSFinalizationGroup(
    5527             :     JSFinalizationGroup finalization_group,
    5528             :     std::function<void(HeapObject object, ObjectSlot slot, Object target)>
    5529             :         gc_notify_updated_slot) {
    5530             :   DCHECK(dirty_js_finalization_groups()->IsUndefined(isolate()) ||
    5531             :          dirty_js_finalization_groups()->IsJSFinalizationGroup());
    5532             :   DCHECK(finalization_group->next()->IsUndefined(isolate()));
    5533             :   DCHECK(!finalization_group->scheduled_for_cleanup());
    5534         208 :   finalization_group->set_scheduled_for_cleanup(true);
    5535         208 :   finalization_group->set_next(dirty_js_finalization_groups());
    5536             :   gc_notify_updated_slot(
    5537             :       finalization_group,
    5538             :       finalization_group.RawField(JSFinalizationGroup::kNextOffset),
    5539             :       dirty_js_finalization_groups());
    5540             :   set_dirty_js_finalization_groups(finalization_group);
    5541             :   // Roots are rescanned after objects are moved, so no need to record a slot
    5542             :   // for the root pointing to the first JSFinalizationGroup.
    5543         208 : }
    5544             : 
    5545         172 : void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
    5546             :   DCHECK(FLAG_harmony_weak_refs);
    5547             :   DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
    5548             :          weak_refs_keep_during_job()->IsOrderedHashSet());
    5549             :   Handle<OrderedHashSet> table;
    5550         172 :   if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
    5551          82 :     table = isolate()->factory()->NewOrderedHashSet();
    5552             :   } else {
    5553             :     table =
    5554             :         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
    5555             :   }
    5556         172 :   table = OrderedHashSet::Add(isolate(), table, target);
    5557             :   set_weak_refs_keep_during_job(*table);
    5558         172 : }
    5559             : 
    5560      668313 : void Heap::ClearKeepDuringJobSet() {
    5561             :   set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
    5562      668313 : }
    5563             : 
    5564           0 : size_t Heap::NumberOfTrackedHeapObjectTypes() {
    5565           0 :   return ObjectStats::OBJECT_STATS_COUNT;
    5566             : }
    5567             : 
    5568             : 
    5569           0 : size_t Heap::ObjectCountAtLastGC(size_t index) {
    5570           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5571             :     return 0;
    5572           0 :   return live_object_stats_->object_count_last_gc(index);
    5573             : }
    5574             : 
    5575             : 
    5576           0 : size_t Heap::ObjectSizeAtLastGC(size_t index) {
    5577           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5578             :     return 0;
    5579           0 :   return live_object_stats_->object_size_last_gc(index);
    5580             : }
    5581             : 
    5582             : 
    5583           0 : bool Heap::GetObjectTypeName(size_t index, const char** object_type,
    5584             :                              const char** object_sub_type) {
    5585           0 :   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
    5586             : 
    5587           0 :   switch (static_cast<int>(index)) {
    5588             : #define COMPARE_AND_RETURN_NAME(name) \
    5589             :   case name:                          \
    5590             :     *object_type = #name;             \
    5591             :     *object_sub_type = "";            \
    5592             :     return true;
    5593           0 :     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5594             : #undef COMPARE_AND_RETURN_NAME
    5595             : 
    5596             : #define COMPARE_AND_RETURN_NAME(name)                       \
    5597             :   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
    5598             :     *object_type = #name;                                   \
    5599             :     *object_sub_type = "";                                  \
    5600             :     return true;
    5601           0 :     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5602             : #undef COMPARE_AND_RETURN_NAME
    5603             :   }
    5604             :   return false;
    5605             : }
    5606             : 
    5607         246 : size_t Heap::NumberOfNativeContexts() {
    5608             :   int result = 0;
    5609             :   Object context = native_contexts_list();
    5610        2422 :   while (!context->IsUndefined(isolate())) {
    5611        1088 :     ++result;
    5612        1088 :     Context native_context = Context::cast(context);
    5613        1088 :     context = native_context->next_context_link();
    5614             :   }
    5615         246 :   return result;
    5616             : }
    5617             : 
    5618         246 : size_t Heap::NumberOfDetachedContexts() {
    5619             :   // The detached_contexts() array has two entries per detached context.
    5620         246 :   return detached_contexts()->length() / 2;
    5621             : }
    5622             : 
    5623           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
    5624             :                                           ObjectSlot end) {
    5625           0 :   VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    5626           0 : }
    5627             : 
    5628           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host,
    5629             :                                           MaybeObjectSlot start,
    5630             :                                           MaybeObjectSlot end) {
    5631           0 :   VerifyPointers(host, start, end);
    5632           0 : }
    5633             : 
    5634           0 : void VerifyPointersVisitor::VisitRootPointers(Root root,
    5635             :                                               const char* description,
    5636             :                                               FullObjectSlot start,
    5637             :                                               FullObjectSlot end) {
    5638             :   VerifyPointersImpl(start, end);
    5639           0 : }
    5640             : 
    5641             : void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
    5642           0 :   CHECK(heap_->Contains(heap_object));
    5643           0 :   CHECK(heap_object->map()->IsMap());
    5644             : }
    5645             : 
    5646             : template <typename TSlot>
    5647             : void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
    5648           0 :   for (TSlot slot = start; slot < end; ++slot) {
    5649             :     typename TSlot::TObject object = *slot;
    5650             :     HeapObject heap_object;
    5651           0 :     if (object.GetHeapObject(&heap_object)) {
    5652             :       VerifyHeapObjectImpl(heap_object);
    5653             :     } else {
    5654           0 :       CHECK(object->IsSmi() || object->IsCleared());
    5655             :     }
    5656             :   }
    5657             : }
    5658             : 
    5659           0 : void VerifyPointersVisitor::VerifyPointers(HeapObject host,
    5660             :                                            MaybeObjectSlot start,
    5661             :                                            MaybeObjectSlot end) {
    5662             :   // If this DCHECK fires then you probably added a pointer field
    5663             :   // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
    5664             :   // this by moving that object to POINTER_VISITOR_ID_LIST.
    5665             :   DCHECK_EQ(ObjectFields::kMaybePointers,
    5666             :             Map::ObjectFieldsFrom(host->map()->visitor_id()));
    5667             :   VerifyPointersImpl(start, end);
    5668           0 : }
    5669             : 
    5670           0 : void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
    5671           0 :   Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5672             :   VerifyHeapObjectImpl(target);
    5673           0 : }
    5674             : 
    5675           0 : void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
    5676             :   VerifyHeapObjectImpl(rinfo->target_object());
    5677           0 : }
    5678             : 
    5679           0 : void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
    5680             :                                           FullObjectSlot start,
    5681             :                                           FullObjectSlot end) {
    5682           0 :   for (FullObjectSlot current = start; current < end; ++current) {
    5683           0 :     CHECK((*current)->IsSmi());
    5684             :   }
    5685           0 : }
    5686             : 
    5687           0 : bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
    5688             :   // Object migration is governed by the following rules:
    5689             :   //
    5690             :   // 1) Objects in new-space can be migrated to the old space
    5691             :   //    that matches their target space or they stay in new-space.
    5692             :   // 2) Objects in old-space stay in the same space when migrating.
    5693             :   // 3) Fillers (two or more words) can migrate due to left-trimming of
    5694             :   //    fixed arrays in new-space or old space.
    5695             :   // 4) Fillers (one word) can never migrate, they are skipped by
    5696             :   //    incremental marking explicitly to prevent invalid pattern.
    5697             :   //
    5698             :   // Since this function is used for debugging only, we do not place
    5699             :   // asserts here, but check everything explicitly.
    5700           0 :   if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
    5701             :   InstanceType type = obj->map()->instance_type();
    5702             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    5703             :   AllocationSpace src = chunk->owner()->identity();
    5704           0 :   switch (src) {
    5705             :     case NEW_SPACE:
    5706           0 :       return dst == NEW_SPACE || dst == OLD_SPACE;
    5707             :     case OLD_SPACE:
    5708           0 :       return dst == OLD_SPACE;
    5709             :     case CODE_SPACE:
    5710           0 :       return dst == CODE_SPACE && type == CODE_TYPE;
    5711             :     case MAP_SPACE:
    5712             :     case LO_SPACE:
    5713             :     case CODE_LO_SPACE:
    5714             :     case NEW_LO_SPACE:
    5715             :     case RO_SPACE:
    5716             :       return false;
    5717             :   }
    5718           0 :   UNREACHABLE();
    5719             : }
    5720             : 
    5721           0 : void Heap::CreateObjectStats() {
    5722           0 :   if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
    5723           0 :   if (!live_object_stats_) {
    5724           0 :     live_object_stats_.reset(new ObjectStats(this));
    5725             :   }
    5726           0 :   if (!dead_object_stats_) {
    5727           0 :     dead_object_stats_.reset(new ObjectStats(this));
    5728             :   }
    5729             : }
    5730             : 
    5731    23423353 : void AllocationObserver::AllocationStep(int bytes_allocated,
    5732             :                                         Address soon_object, size_t size) {
    5733             :   DCHECK_GE(bytes_allocated, 0);
    5734    23423353 :   bytes_to_next_step_ -= bytes_allocated;
    5735    23423353 :   if (bytes_to_next_step_ <= 0) {
    5736      210741 :     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
    5737      210742 :     step_size_ = GetNextStepSize();
    5738      210742 :     bytes_to_next_step_ = step_size_;
    5739             :   }
    5740             :   DCHECK_GE(bytes_to_next_step_, 0);
    5741    23423354 : }
    5742             : 
    5743             : namespace {
    5744             : 
    5745     2267104 : Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
    5746             :   MapWord map_word = object->map_word();
    5747             :   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
    5748     4534208 :                                         : map_word.ToMap();
    5749             : }
    5750             : 
    5751             : int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
    5752     2267104 :   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
    5753             : }
    5754             : 
    5755             : Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
    5756             :   Code code = Code::unchecked_cast(object);
    5757             :   DCHECK(!code.is_null());
    5758             :   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
    5759             :   return code;
    5760             : }
    5761             : 
    5762             : }  // namespace
    5763             : 
    5764           0 : bool Heap::GcSafeCodeContains(Code code, Address addr) {
    5765           0 :   Map map = GcSafeMapOfCodeSpaceObject(code);
    5766             :   DCHECK(map == ReadOnlyRoots(this).code_map());
    5767           0 :   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
    5768             :   Address start = code->address();
    5769           0 :   Address end = code->address() + code->SizeFromMap(map);
    5770           0 :   return start <= addr && addr < end;
    5771             : }
    5772             : 
    5773     1093533 : Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
    5774     1093533 :   Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
    5775     1093534 :   if (!code.is_null()) return code;
    5776             : 
    5777             :   // Check if the inner pointer points into a large object chunk.
    5778      537690 :   LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
    5779      537690 :   if (large_page != nullptr) {
    5780             :     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
    5781             :   }
    5782             : 
    5783             :   DCHECK(code_space()->Contains(inner_pointer));
    5784             : 
    5785             :   // Iterate through the page until we reach the end or find an object starting
    5786             :   // after the inner pointer.
    5787             :   Page* page = Page::FromAddress(inner_pointer);
    5788             :   DCHECK_EQ(page->owner(), code_space());
    5789      532637 :   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
    5790             : 
    5791             :   Address addr = page->skip_list()->StartFor(inner_pointer);
    5792             :   Address top = code_space()->top();
    5793             :   Address limit = code_space()->limit();
    5794             : 
    5795             :   while (true) {
    5796     2268621 :     if (addr == top && addr != limit) {
    5797             :       addr = limit;
    5798             :       continue;
    5799             :     }
    5800             : 
    5801             :     HeapObject obj = HeapObject::FromAddress(addr);
    5802             :     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
    5803     2267108 :     Address next_addr = addr + obj_size;
    5804     2267108 :     if (next_addr > inner_pointer) {
    5805             :       return GcSafeCastToCode(this, obj, inner_pointer);
    5806             :     }
    5807             :     addr = next_addr;
    5808             :   }
    5809             : }
    5810             : 
    5811          34 : void Heap::WriteBarrierForCodeSlow(Code code) {
    5812          39 :   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
    5813           5 :        !it.done(); it.next()) {
    5814           5 :     GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5815           5 :     MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5816             :   }
    5817          34 : }
    5818             : 
    5819           0 : void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
    5820             :                                    HeapObject value) {
    5821             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5822             :   heap->store_buffer()->InsertEntry(slot);
    5823           0 : }
    5824             : 
    5825           2 : void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
    5826             :   DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
    5827             :   int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
    5828           2 :   int entry = EphemeronHashTable::IndexToEntry(slot_index);
    5829             :   auto it =
    5830           6 :       ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
    5831             :   it.first->second.insert(entry);
    5832           2 : }
    5833             : 
    5834        1416 : void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
    5835             :                                             Address key_slot_address,
    5836             :                                             Isolate* isolate) {
    5837             :   EphemeronHashTable table = EphemeronHashTable::cast(Object(raw_object));
    5838             :   MaybeObjectSlot key_slot(key_slot_address);
    5839             :   MaybeObject maybe_key = *key_slot;
    5840             :   HeapObject key;
    5841        1416 :   if (!maybe_key.GetHeapObject(&key)) return;
    5842        1418 :   if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
    5843           2 :     isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
    5844             :   }
    5845             :   isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(table, key_slot,
    5846             :                                                                maybe_key);
    5847             : }
    5848             : 
    5849         998 : void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
    5850             :                                               int offset, int length) {
    5851     1401782 :   for (int i = 0; i < length; i++) {
    5852     1400784 :     if (!InYoungGeneration(array->get(offset + i))) continue;
    5853             :     heap->store_buffer()->InsertEntry(
    5854             :         array->RawFieldOfElementAt(offset + i).address());
    5855             :   }
    5856         998 : }
    5857             : 
    5858      220061 : void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5859             :                                           HeapObject object) {
    5860             :   DCHECK(InYoungGeneration(object));
    5861             :   Page* source_page = Page::FromHeapObject(host);
    5862             :   RelocInfo::Mode rmode = rinfo->rmode();
    5863             :   Address addr = rinfo->pc();
    5864             :   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    5865      220061 :   if (rinfo->IsInConstantPool()) {
    5866             :     addr = rinfo->constant_pool_entry_address();
    5867             :     if (RelocInfo::IsCodeTargetMode(rmode)) {
    5868             :       slot_type = CODE_ENTRY_SLOT;
    5869             :     } else {
    5870             :       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    5871             :       slot_type = OBJECT_SLOT;
    5872             :     }
    5873             :   }
    5874      440122 :   uintptr_t offset = addr - source_page->address();
    5875             :   DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
    5876      220061 :   RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
    5877      220061 :                                          static_cast<uint32_t>(offset));
    5878      220061 : }
    5879             : 
    5880           0 : void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
    5881             :                               HeapObject value) {
    5882             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5883   124707169 :   heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
    5884   124707252 :                                                value);
    5885           0 : }
    5886             : 
    5887     1863511 : void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
    5888             :   IncrementalMarking::MarkingState* marking_state =
    5889             :       heap->incremental_marking()->marking_state();
    5890     1863511 :   if (!marking_state->IsBlack(object)) {
    5891             :     marking_state->WhiteToGrey(object);
    5892             :     marking_state->GreyToBlack(object);
    5893             :   }
    5894     1863511 :   heap->incremental_marking()->RevisitObject(object);
    5895     1863511 : }
    5896             : 
    5897           0 : void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5898             :                                      HeapObject object) {
    5899             :   Heap* heap = Heap::FromWritableHeapObject(host);
    5900             :   DCHECK(heap->incremental_marking()->IsMarking());
    5901      271968 :   heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
    5902           0 : }
    5903             : 
    5904     8910690 : void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
    5905             :                                                 HeapObject raw_descriptor_array,
    5906             :                                                 int number_of_own_descriptors) {
    5907             :   DCHECK(heap->incremental_marking()->IsMarking());
    5908             :   DescriptorArray descriptor_array =
    5909             :       DescriptorArray::cast(raw_descriptor_array);
    5910             :   int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
    5911     8910690 :   if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
    5912     8910690 :                                         raw_marked) <
    5913             :       number_of_own_descriptors) {
    5914             :     heap->incremental_marking()->VisitDescriptors(host, descriptor_array,
    5915     3650430 :                                                   number_of_own_descriptors);
    5916             :   }
    5917     8910695 : }
    5918             : 
    5919           0 : bool Heap::PageFlagsAreConsistent(HeapObject object) {
    5920             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5921             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5922             :   heap_internals::MemoryChunk* slim_chunk =
    5923             :       heap_internals::MemoryChunk::FromHeapObject(object);
    5924             : 
    5925             :   const bool generation_consistency =
    5926           0 :       chunk->owner()->identity() != NEW_SPACE ||
    5927           0 :       (chunk->InYoungGeneration() && slim_chunk->InYoungGeneration());
    5928             :   const bool marking_consistency =
    5929           0 :       !heap->incremental_marking()->IsMarking() ||
    5930           0 :       (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
    5931             :        slim_chunk->IsMarking());
    5932             : 
    5933           0 :   return generation_consistency && marking_consistency;
    5934             : }
    5935             : 
    5936             : static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
    5937             :                   heap_internals::MemoryChunk::kMarkingBit,
    5938             :               "Incremental marking flag inconsistent");
    5939             : static_assert(MemoryChunk::Flag::FROM_PAGE ==
    5940             :                   heap_internals::MemoryChunk::kFromPageBit,
    5941             :               "From page flag inconsistent");
    5942             : static_assert(MemoryChunk::Flag::TO_PAGE ==
    5943             :                   heap_internals::MemoryChunk::kToPageBit,
    5944             :               "To page flag inconsistent");
    5945             : static_assert(MemoryChunk::kFlagsOffset ==
    5946             :                   heap_internals::MemoryChunk::kFlagsOffset,
    5947             :               "Flag offset inconsistent");
    5948             : static_assert(MemoryChunk::kHeapOffset ==
    5949             :                   heap_internals::MemoryChunk::kHeapOffset,
    5950             :               "Heap offset inconsistent");
    5951             : static_assert(MemoryChunk::kOwnerOffset ==
    5952             :                   heap_internals::MemoryChunk::kOwnerOffset,
    5953             :               "Owner offset inconsistent");
    5954             : 
    5955           5 : void Heap::SetEmbedderStackStateForNextFinalizaton(
    5956             :     EmbedderHeapTracer::EmbedderStackState stack_state) {
    5957             :   local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
    5958           5 :       stack_state);
    5959           5 : }
    5960             : 
    5961             : #ifdef DEBUG
    5962             : void Heap::IncrementObjectCounters() {
    5963             :   isolate_->counters()->objs_since_last_full()->Increment();
    5964             :   isolate_->counters()->objs_since_last_young()->Increment();
    5965             : }
    5966             : #endif  // DEBUG
    5967             : 
    5968             : }  // namespace internal
    5969      122036 : }  // namespace v8

Generated by: LCOV version 1.10