LCOV - code coverage report
Current view: top level - src/heap - heap.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1741 2264 76.9 %
Date: 2019-03-21 Functions: 239 318 75.2 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/heap.h"
       6             : 
       7             : #include <unordered_map>
       8             : #include <unordered_set>
       9             : 
      10             : #include "src/accessors.h"
      11             : #include "src/api-inl.h"
      12             : #include "src/assembler-inl.h"
      13             : #include "src/base/bits.h"
      14             : #include "src/base/once.h"
      15             : #include "src/base/utils/random-number-generator.h"
      16             : #include "src/bootstrapper.h"
      17             : #include "src/compilation-cache.h"
      18             : #include "src/conversions.h"
      19             : #include "src/debug/debug.h"
      20             : #include "src/deoptimizer.h"
      21             : #include "src/feedback-vector.h"
      22             : #include "src/global-handles.h"
      23             : #include "src/heap/array-buffer-collector.h"
      24             : #include "src/heap/array-buffer-tracker-inl.h"
      25             : #include "src/heap/barrier.h"
      26             : #include "src/heap/code-stats.h"
      27             : #include "src/heap/concurrent-marking.h"
      28             : #include "src/heap/embedder-tracing.h"
      29             : #include "src/heap/gc-idle-time-handler.h"
      30             : #include "src/heap/gc-tracer.h"
      31             : #include "src/heap/heap-controller.h"
      32             : #include "src/heap/heap-write-barrier-inl.h"
      33             : #include "src/heap/incremental-marking.h"
      34             : #include "src/heap/mark-compact-inl.h"
      35             : #include "src/heap/mark-compact.h"
      36             : #include "src/heap/memory-reducer.h"
      37             : #include "src/heap/object-stats.h"
      38             : #include "src/heap/objects-visiting-inl.h"
      39             : #include "src/heap/objects-visiting.h"
      40             : #include "src/heap/read-only-heap.h"
      41             : #include "src/heap/remembered-set.h"
      42             : #include "src/heap/scavenge-job.h"
      43             : #include "src/heap/scavenger-inl.h"
      44             : #include "src/heap/store-buffer.h"
      45             : #include "src/heap/stress-marking-observer.h"
      46             : #include "src/heap/stress-scavenge-observer.h"
      47             : #include "src/heap/sweeper.h"
      48             : #include "src/interpreter/interpreter.h"
      49             : #include "src/log.h"
      50             : #include "src/microtask-queue.h"
      51             : #include "src/objects/data-handler.h"
      52             : #include "src/objects/free-space-inl.h"
      53             : #include "src/objects/hash-table-inl.h"
      54             : #include "src/objects/maybe-object.h"
      55             : #include "src/objects/shared-function-info.h"
      56             : #include "src/objects/slots-inl.h"
      57             : #include "src/regexp/jsregexp.h"
      58             : #include "src/runtime-profiler.h"
      59             : #include "src/snapshot/embedded-data.h"
      60             : #include "src/snapshot/natives.h"
      61             : #include "src/snapshot/serializer-common.h"
      62             : #include "src/snapshot/snapshot.h"
      63             : #include "src/string-stream.h"
      64             : #include "src/tracing/trace-event.h"
      65             : #include "src/unicode-decoder.h"
      66             : #include "src/unicode-inl.h"
      67             : #include "src/utils-inl.h"
      68             : #include "src/utils.h"
      69             : #include "src/v8.h"
      70             : #include "src/v8threads.h"
      71             : #include "src/vm-state-inl.h"
      72             : 
      73             : // Has to be the last include (doesn't have include guards):
      74             : #include "src/objects/object-macros.h"
      75             : 
      76             : namespace v8 {
      77             : namespace internal {
      78             : 
      79             : // These are outside the Heap class so they can be forward-declared
      80             : // in heap-write-barrier-inl.h.
      81           0 : bool Heap_PageFlagsAreConsistent(HeapObject object) {
      82           0 :   return Heap::PageFlagsAreConsistent(object);
      83             : }
      84             : 
      85   107637879 : void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
      86             :                                   HeapObject value) {
      87             :   Heap::GenerationalBarrierSlow(object, slot, value);
      88   107637813 : }
      89             : 
      90   120256610 : void Heap_MarkingBarrierSlow(HeapObject object, Address slot,
      91             :                              HeapObject value) {
      92             :   Heap::MarkingBarrierSlow(object, slot, value);
      93   120256400 : }
      94             : 
      95          34 : void Heap_WriteBarrierForCodeSlow(Code host) {
      96          34 :   Heap::WriteBarrierForCodeSlow(host);
      97          34 : }
      98             : 
      99      220816 : void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
     100             :                                          HeapObject object) {
     101      220821 :   Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
     102      220816 : }
     103             : 
     104      276661 : void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
     105             :                                     HeapObject object) {
     106             :   Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
     107      276661 : }
     108             : 
     109         963 : void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
     110             :                                              int offset, int length) {
     111         976 :   Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
     112         963 : }
     113             : 
     114         230 : void Heap_MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
     115       16170 :   Heap::MarkingBarrierForElementsSlow(heap, object);
     116         230 : }
     117             : 
     118     8137074 : void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
     119             :                                                HeapObject descriptor_array,
     120             :                                                int number_of_own_descriptors) {
     121             :   Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
     122     8137074 :                                              number_of_own_descriptors);
     123     8137080 : }
     124             : 
     125          56 : void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
     126             :   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
     127             :   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
     128          56 : }
     129             : 
     130          56 : void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
     131             :   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
     132             :   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
     133          56 : }
     134             : 
     135          56 : void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
     136             :   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
     137             :   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
     138          56 : }
     139             : 
     140          56 : void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
     141             :   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
     142             :   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
     143          56 : }
     144             : 
     145         246 : void Heap::SetSerializedObjects(FixedArray objects) {
     146             :   DCHECK(isolate()->serializer_enabled());
     147             :   set_serialized_objects(objects);
     148         246 : }
     149             : 
     150         196 : void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
     151             :   DCHECK(isolate()->serializer_enabled());
     152             :   set_serialized_global_proxy_sizes(sizes);
     153         196 : }
     154             : 
     155           0 : bool Heap::GCCallbackTuple::operator==(
     156             :     const Heap::GCCallbackTuple& other) const {
     157           0 :   return other.callback == callback && other.data == data;
     158             : }
     159             : 
     160             : Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
     161             :     const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
     162             : 
     163             : struct Heap::StrongRootsList {
     164             :   FullObjectSlot start;
     165             :   FullObjectSlot end;
     166             :   StrongRootsList* next;
     167             : };
     168             : 
     169      123034 : class IdleScavengeObserver : public AllocationObserver {
     170             :  public:
     171             :   IdleScavengeObserver(Heap& heap, intptr_t step_size)
     172       61534 :       : AllocationObserver(step_size), heap_(heap) {}
     173             : 
     174       23459 :   void Step(int bytes_allocated, Address, size_t) override {
     175       23459 :     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
     176       23459 :   }
     177             : 
     178             :  private:
     179             :   Heap& heap_;
     180             : };
     181             : 
     182       61533 : Heap::Heap()
     183             :     : isolate_(isolate()),
     184             :       initial_max_old_generation_size_(max_old_generation_size_),
     185             :       initial_max_old_generation_size_threshold_(0),
     186             :       initial_old_generation_size_(max_old_generation_size_ /
     187             :                                    kInitalOldGenerationLimitFactor),
     188             :       memory_pressure_level_(MemoryPressureLevel::kNone),
     189             :       old_generation_allocation_limit_(initial_old_generation_size_),
     190             :       global_pretenuring_feedback_(kInitialFeedbackCapacity),
     191             :       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
     192             :       is_current_gc_forced_(false),
     193      615334 :       external_string_table_(this) {
     194             :   // Ensure old_generation_size_ is a multiple of kPageSize.
     195             :   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
     196             : 
     197             :   set_native_contexts_list(Smi::kZero);
     198             :   set_allocation_sites_list(Smi::kZero);
     199             :   // Put a dummy entry in the remembered pages so we can find the list the
     200             :   // minidump even if there are no real unmapped pages.
     201             :   RememberUnmappedPage(kNullAddress, false);
     202       61534 : }
     203             : 
     204             : Heap::~Heap() = default;
     205             : 
     206        1261 : size_t Heap::MaxReserved() {
     207     2643621 :   const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
     208     2643621 :   return static_cast<size_t>(2 * max_semi_space_size_ +
     209             :                              kMaxNewLargeObjectSpaceSize +
     210     2643621 :                              max_old_generation_size_);
     211             : }
     212             : 
     213       29808 : size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
     214             :   const size_t old_space_physical_memory_factor = 4;
     215       29808 :   size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
     216             :                                              old_space_physical_memory_factor *
     217       29808 :                                              kPointerMultiplier);
     218             :   return Max(Min(computed_size, HeapController::kMaxSize),
     219       29808 :              HeapController::kMinSize);
     220             : }
     221             : 
     222          59 : size_t Heap::Capacity() {
     223          59 :   if (!HasBeenSetUp()) return 0;
     224             : 
     225          59 :   return new_space_->Capacity() + OldGenerationCapacity();
     226             : }
     227             : 
     228     2725754 : size_t Heap::OldGenerationCapacity() {
     229     2725754 :   if (!HasBeenSetUp()) return 0;
     230             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     231             :   size_t total = 0;
     232    12706586 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     233             :        space = spaces.next()) {
     234    10165262 :     total += space->Capacity();
     235             :   }
     236     2541322 :   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
     237             : }
     238             : 
     239      694536 : size_t Heap::CommittedOldGenerationMemory() {
     240      694536 :   if (!HasBeenSetUp()) return 0;
     241             : 
     242             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     243             :   size_t total = 0;
     244     3472679 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     245             :        space = spaces.next()) {
     246     2778143 :     total += space->CommittedMemory();
     247             :   }
     248      694536 :   return total + lo_space_->Size() + code_lo_space_->Size();
     249             : }
     250             : 
     251           0 : size_t Heap::CommittedMemoryOfUnmapper() {
     252           0 :   if (!HasBeenSetUp()) return 0;
     253             : 
     254           0 :   return memory_allocator()->unmapper()->CommittedBufferedMemory();
     255             : }
     256             : 
     257      546602 : size_t Heap::CommittedMemory() {
     258      546602 :   if (!HasBeenSetUp()) return 0;
     259             : 
     260      546603 :   return new_space_->CommittedMemory() + new_lo_space_->Size() +
     261      546603 :          CommittedOldGenerationMemory();
     262             : }
     263             : 
     264             : 
     265         246 : size_t Heap::CommittedPhysicalMemory() {
     266         246 :   if (!HasBeenSetUp()) return 0;
     267             : 
     268             :   size_t total = 0;
     269        4182 :   for (SpaceIterator it(this); it.has_next();) {
     270        1968 :     total += it.next()->CommittedPhysicalMemory();
     271             :   }
     272             : 
     273         246 :   return total;
     274             : }
     275             : 
     276      128799 : size_t Heap::CommittedMemoryExecutable() {
     277      128799 :   if (!HasBeenSetUp()) return 0;
     278             : 
     279      128799 :   return static_cast<size_t>(memory_allocator()->SizeExecutable());
     280             : }
     281             : 
     282             : 
     283           0 : void Heap::UpdateMaximumCommitted() {
     284      251374 :   if (!HasBeenSetUp()) return;
     285             : 
     286      251375 :   const size_t current_committed_memory = CommittedMemory();
     287      251375 :   if (current_committed_memory > maximum_committed_) {
     288       92844 :     maximum_committed_ = current_committed_memory;
     289             :   }
     290             : }
     291             : 
     292         305 : size_t Heap::Available() {
     293         305 :   if (!HasBeenSetUp()) return 0;
     294             : 
     295             :   size_t total = 0;
     296             : 
     297        5185 :   for (SpaceIterator it(this); it.has_next();) {
     298        2440 :     total += it.next()->Available();
     299             :   }
     300             : 
     301         305 :   total += memory_allocator()->Available();
     302         305 :   return total;
     303             : }
     304             : 
     305     2593763 : bool Heap::CanExpandOldGeneration(size_t size) {
     306     2593763 :   if (force_oom_) return false;
     307     2582444 :   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
     308             :   // The OldGenerationCapacity does not account compaction spaces used
     309             :   // during evacuation. Ensure that expanding the old generation does push
     310             :   // the total allocated memory size over the maximum heap size.
     311     5161652 :   return memory_allocator()->Size() + size <= MaxReserved();
     312             : }
     313             : 
     314          15 : bool Heap::HasBeenSetUp() {
     315             :   // We will always have a new space when the heap is set up.
     316     6282446 :   return new_space_ != nullptr;
     317             : }
     318             : 
     319             : 
     320       94928 : GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
     321             :                                               const char** reason) {
     322             :   // Is global GC requested?
     323       94928 :   if (space != NEW_SPACE && space != NEW_LO_SPACE) {
     324      146990 :     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
     325       73495 :     *reason = "GC in old space requested";
     326       73495 :     return MARK_COMPACTOR;
     327             :   }
     328             : 
     329       21433 :   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
     330         406 :     *reason = "GC in old space forced by flags";
     331         406 :     return MARK_COMPACTOR;
     332             :   }
     333             : 
     334       21384 :   if (incremental_marking()->NeedsFinalization() &&
     335         357 :       AllocationLimitOvershotByLargeMargin()) {
     336          44 :     *reason = "Incremental marking needs finalization";
     337          44 :     return MARK_COMPACTOR;
     338             :   }
     339             : 
     340             :   // Over-estimate the new space size using capacity to allow some slack.
     341       41966 :   if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
     342       20983 :                               new_lo_space()->Size())) {
     343          10 :     isolate_->counters()
     344             :         ->gc_compactor_caused_by_oldspace_exhaustion()
     345          10 :         ->Increment();
     346          10 :     *reason = "scavenge might not succeed";
     347          10 :     return MARK_COMPACTOR;
     348             :   }
     349             : 
     350             :   // Default
     351       20973 :   *reason = nullptr;
     352       20973 :   return YoungGenerationCollector();
     353             : }
     354             : 
     355           0 : void Heap::SetGCState(HeapState state) {
     356      251373 :   gc_state_ = state;
     357           0 : }
     358             : 
     359          35 : void Heap::PrintShortHeapStatistics() {
     360          35 :   if (!FLAG_trace_gc_verbose) return;
     361           0 :   PrintIsolate(isolate_,
     362             :                "Memory allocator,       used: %6" PRIuS
     363             :                " KB,"
     364             :                " available: %6" PRIuS " KB\n",
     365             :                memory_allocator()->Size() / KB,
     366           0 :                memory_allocator()->Available() / KB);
     367           0 :   PrintIsolate(isolate_,
     368             :                "Read-only space,        used: %6" PRIuS
     369             :                " KB"
     370             :                ", available: %6" PRIuS
     371             :                " KB"
     372             :                ", committed: %6" PRIuS " KB\n",
     373           0 :                read_only_space_->Size() / KB,
     374           0 :                read_only_space_->Available() / KB,
     375           0 :                read_only_space_->CommittedMemory() / KB);
     376           0 :   PrintIsolate(isolate_,
     377             :                "New space,              used: %6" PRIuS
     378             :                " KB"
     379             :                ", available: %6" PRIuS
     380             :                " KB"
     381             :                ", committed: %6" PRIuS " KB\n",
     382           0 :                new_space_->Size() / KB, new_space_->Available() / KB,
     383           0 :                new_space_->CommittedMemory() / KB);
     384           0 :   PrintIsolate(isolate_,
     385             :                "New large object space, used: %6" PRIuS
     386             :                " KB"
     387             :                ", available: %6" PRIuS
     388             :                " KB"
     389             :                ", committed: %6" PRIuS " KB\n",
     390           0 :                new_lo_space_->SizeOfObjects() / KB,
     391           0 :                new_lo_space_->Available() / KB,
     392           0 :                new_lo_space_->CommittedMemory() / KB);
     393           0 :   PrintIsolate(isolate_,
     394             :                "Old space,              used: %6" PRIuS
     395             :                " KB"
     396             :                ", available: %6" PRIuS
     397             :                " KB"
     398             :                ", committed: %6" PRIuS " KB\n",
     399           0 :                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
     400           0 :                old_space_->CommittedMemory() / KB);
     401           0 :   PrintIsolate(isolate_,
     402             :                "Code space,             used: %6" PRIuS
     403             :                " KB"
     404             :                ", available: %6" PRIuS
     405             :                " KB"
     406             :                ", committed: %6" PRIuS "KB\n",
     407           0 :                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
     408           0 :                code_space_->CommittedMemory() / KB);
     409           0 :   PrintIsolate(isolate_,
     410             :                "Map space,              used: %6" PRIuS
     411             :                " KB"
     412             :                ", available: %6" PRIuS
     413             :                " KB"
     414             :                ", committed: %6" PRIuS " KB\n",
     415           0 :                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
     416           0 :                map_space_->CommittedMemory() / KB);
     417           0 :   PrintIsolate(isolate_,
     418             :                "Large object space,     used: %6" PRIuS
     419             :                " KB"
     420             :                ", available: %6" PRIuS
     421             :                " KB"
     422             :                ", committed: %6" PRIuS " KB\n",
     423           0 :                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
     424           0 :                lo_space_->CommittedMemory() / KB);
     425           0 :   PrintIsolate(isolate_,
     426             :                "Code large object space,     used: %6" PRIuS
     427             :                " KB"
     428             :                ", available: %6" PRIuS
     429             :                " KB"
     430             :                ", committed: %6" PRIuS " KB\n",
     431           0 :                lo_space_->SizeOfObjects() / KB,
     432           0 :                code_lo_space_->Available() / KB,
     433           0 :                code_lo_space_->CommittedMemory() / KB);
     434           0 :   PrintIsolate(isolate_,
     435             :                "All spaces,             used: %6" PRIuS
     436             :                " KB"
     437             :                ", available: %6" PRIuS
     438             :                " KB"
     439             :                ", committed: %6" PRIuS "KB\n",
     440           0 :                this->SizeOfObjects() / KB, this->Available() / KB,
     441           0 :                this->CommittedMemory() / KB);
     442           0 :   PrintIsolate(isolate_,
     443             :                "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
     444             :                memory_allocator()->unmapper()->NumberOfCommittedChunks(),
     445           0 :                CommittedMemoryOfUnmapper() / KB);
     446           0 :   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
     447           0 :                isolate()->isolate_data()->external_memory_ / KB);
     448           0 :   PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
     449           0 :                backing_store_bytes_ / KB);
     450           0 :   PrintIsolate(isolate_, "External memory global %zu KB\n",
     451           0 :                external_memory_callback_() / KB);
     452           0 :   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
     453           0 :                total_gc_time_ms_);
     454             : }
     455             : 
     456           0 : void Heap::ReportStatisticsAfterGC() {
     457           0 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
     458             :        ++i) {
     459           0 :     int count = deferred_counters_[i];
     460           0 :     deferred_counters_[i] = 0;
     461           0 :     while (count > 0) {
     462           0 :       count--;
     463           0 :       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
     464             :     }
     465             :   }
     466           0 : }
     467             : 
     468        8174 : void Heap::AddHeapObjectAllocationTracker(
     469             :     HeapObjectAllocationTracker* tracker) {
     470        8174 :   if (allocation_trackers_.empty()) DisableInlineAllocation();
     471        8174 :   allocation_trackers_.push_back(tracker);
     472        8174 : }
     473             : 
     474        8170 : void Heap::RemoveHeapObjectAllocationTracker(
     475             :     HeapObjectAllocationTracker* tracker) {
     476             :   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
     477             :                                          allocation_trackers_.end(), tracker),
     478        8170 :                              allocation_trackers_.end());
     479        8170 :   if (allocation_trackers_.empty()) EnableInlineAllocation();
     480        8170 : }
     481             : 
     482           0 : void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
     483             :                                   RetainingPathOption option) {
     484           0 :   if (!FLAG_track_retaining_path) {
     485           0 :     PrintF("Retaining path tracking requires --track-retaining-path\n");
     486             :   } else {
     487             :     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
     488           0 :     int index = array->length();
     489             :     array = WeakArrayList::AddToEnd(isolate(), array,
     490           0 :                                     MaybeObjectHandle::Weak(object));
     491             :     set_retaining_path_targets(*array);
     492             :     DCHECK_EQ(array->length(), index + 1);
     493           0 :     retaining_path_target_option_[index] = option;
     494             :   }
     495           0 : }
     496             : 
     497           0 : bool Heap::IsRetainingPathTarget(HeapObject object,
     498             :                                  RetainingPathOption* option) {
     499             :   WeakArrayList targets = retaining_path_targets();
     500             :   int length = targets->length();
     501             :   MaybeObject object_to_check = HeapObjectReference::Weak(object);
     502           0 :   for (int i = 0; i < length; i++) {
     503             :     MaybeObject target = targets->Get(i);
     504             :     DCHECK(target->IsWeakOrCleared());
     505           0 :     if (target == object_to_check) {
     506             :       DCHECK(retaining_path_target_option_.count(i));
     507           0 :       *option = retaining_path_target_option_[i];
     508             :       return true;
     509             :     }
     510             :   }
     511           0 :   return false;
     512             : }
     513             : 
     514           0 : void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
     515           0 :   PrintF("\n\n\n");
     516           0 :   PrintF("#################################################\n");
     517           0 :   PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
     518           0 :   HeapObject object = target;
     519             :   std::vector<std::pair<HeapObject, bool>> retaining_path;
     520             :   Root root = Root::kUnknown;
     521             :   bool ephemeron = false;
     522             :   while (true) {
     523           0 :     retaining_path.push_back(std::make_pair(object, ephemeron));
     524           0 :     if (option == RetainingPathOption::kTrackEphemeronPath &&
     525             :         ephemeron_retainer_.count(object)) {
     526           0 :       object = ephemeron_retainer_[object];
     527             :       ephemeron = true;
     528           0 :     } else if (retainer_.count(object)) {
     529           0 :       object = retainer_[object];
     530             :       ephemeron = false;
     531             :     } else {
     532           0 :       if (retaining_root_.count(object)) {
     533           0 :         root = retaining_root_[object];
     534             :       }
     535             :       break;
     536             :     }
     537             :   }
     538           0 :   int distance = static_cast<int>(retaining_path.size());
     539           0 :   for (auto node : retaining_path) {
     540           0 :     HeapObject object = node.first;
     541           0 :     bool ephemeron = node.second;
     542           0 :     PrintF("\n");
     543           0 :     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     544           0 :     PrintF("Distance from root %d%s: ", distance,
     545           0 :            ephemeron ? " (ephemeron)" : "");
     546           0 :     object->ShortPrint();
     547           0 :     PrintF("\n");
     548             : #ifdef OBJECT_PRINT
     549             :     object->Print();
     550             :     PrintF("\n");
     551             : #endif
     552           0 :     --distance;
     553             :   }
     554           0 :   PrintF("\n");
     555           0 :   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     556           0 :   PrintF("Root: %s\n", RootVisitor::RootName(root));
     557           0 :   PrintF("-------------------------------------------------\n");
     558           0 : }
     559             : 
     560           0 : void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
     561           0 :   if (retainer_.count(object)) return;
     562           0 :   retainer_[object] = retainer;
     563           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     564           0 :   if (IsRetainingPathTarget(object, &option)) {
     565             :     // Check if the retaining path was already printed in
     566             :     // AddEphemeronRetainer().
     567           0 :     if (ephemeron_retainer_.count(object) == 0 ||
     568           0 :         option == RetainingPathOption::kDefault) {
     569           0 :       PrintRetainingPath(object, option);
     570             :     }
     571             :   }
     572             : }
     573             : 
     574           0 : void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
     575           0 :   if (ephemeron_retainer_.count(object)) return;
     576           0 :   ephemeron_retainer_[object] = retainer;
     577           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     578           0 :   if (IsRetainingPathTarget(object, &option) &&
     579           0 :       option == RetainingPathOption::kTrackEphemeronPath) {
     580             :     // Check if the retaining path was already printed in AddRetainer().
     581           0 :     if (retainer_.count(object) == 0) {
     582           0 :       PrintRetainingPath(object, option);
     583             :     }
     584             :   }
     585             : }
     586             : 
     587           0 : void Heap::AddRetainingRoot(Root root, HeapObject object) {
     588           0 :   if (retaining_root_.count(object)) return;
     589           0 :   retaining_root_[object] = root;
     590           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     591           0 :   if (IsRetainingPathTarget(object, &option)) {
     592           0 :     PrintRetainingPath(object, option);
     593             :   }
     594             : }
     595             : 
     596           0 : void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
     597           0 :   deferred_counters_[feature]++;
     598           0 : }
     599             : 
     600       26293 : bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
     601             : 
     602       94928 : void Heap::GarbageCollectionPrologue() {
     603      379712 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
     604             :   {
     605             :     AllowHeapAllocation for_the_first_part_of_prologue;
     606       94928 :     gc_count_++;
     607             : 
     608             : #ifdef VERIFY_HEAP
     609             :     if (FLAG_verify_heap) {
     610             :       Verify();
     611             :     }
     612             : #endif
     613             :   }
     614             : 
     615             :   // Reset GC statistics.
     616       94928 :   promoted_objects_size_ = 0;
     617       94928 :   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
     618       94928 :   semi_space_copied_object_size_ = 0;
     619       94928 :   nodes_died_in_new_space_ = 0;
     620       94928 :   nodes_copied_in_new_space_ = 0;
     621       94928 :   nodes_promoted_ = 0;
     622             : 
     623             :   UpdateMaximumCommitted();
     624             : 
     625             : #ifdef DEBUG
     626             :   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
     627             : 
     628             :   if (FLAG_gc_verbose) Print();
     629             : #endif  // DEBUG
     630             : 
     631       94928 :   if (new_space_->IsAtMaximumCapacity()) {
     632        2008 :     maximum_size_scavenges_++;
     633             :   } else {
     634       92920 :     maximum_size_scavenges_ = 0;
     635             :   }
     636       94928 :   CheckNewSpaceExpansionCriteria();
     637             :   UpdateNewSpaceAllocationCounter();
     638       94928 :   if (FLAG_track_retaining_path) {
     639             :     retainer_.clear();
     640             :     ephemeron_retainer_.clear();
     641             :     retaining_root_.clear();
     642             :   }
     643       94928 :   memory_allocator()->unmapper()->PrepareForGC();
     644       94928 : }
     645             : 
     646      190702 : size_t Heap::SizeOfObjects() {
     647             :   size_t total = 0;
     648             : 
     649     7208261 :   for (SpaceIterator it(this); it.has_next();) {
     650     5051240 :     total += it.next()->SizeOfObjects();
     651             :   }
     652      190702 :   return total;
     653             : }
     654             : 
     655             : 
     656          40 : const char* Heap::GetSpaceName(int idx) {
     657          40 :   switch (idx) {
     658             :     case NEW_SPACE:
     659             :       return "new_space";
     660             :     case OLD_SPACE:
     661           5 :       return "old_space";
     662             :     case MAP_SPACE:
     663           5 :       return "map_space";
     664             :     case CODE_SPACE:
     665           5 :       return "code_space";
     666             :     case LO_SPACE:
     667           5 :       return "large_object_space";
     668             :     case NEW_LO_SPACE:
     669           5 :       return "new_large_object_space";
     670             :     case CODE_LO_SPACE:
     671           5 :       return "code_large_object_space";
     672             :     case RO_SPACE:
     673           5 :       return "read_only_space";
     674             :     default:
     675           0 :       UNREACHABLE();
     676             :   }
     677             :   return nullptr;
     678             : }
     679             : 
     680      111048 : void Heap::MergeAllocationSitePretenuringFeedback(
     681             :     const PretenuringFeedbackMap& local_pretenuring_feedback) {
     682             :   AllocationSite site;
     683      184150 :   for (auto& site_and_count : local_pretenuring_feedback) {
     684       73102 :     site = site_and_count.first;
     685             :     MapWord map_word = site_and_count.first->map_word();
     686       73102 :     if (map_word.IsForwardingAddress()) {
     687             :       site = AllocationSite::cast(map_word.ToForwardingAddress());
     688             :     }
     689             : 
     690             :     // We have not validated the allocation site yet, since we have not
     691             :     // dereferenced the site during collecting information.
     692             :     // This is an inlined check of AllocationMemento::IsValid.
     693      146204 :     if (!site->IsAllocationSite() || site->IsZombie()) continue;
     694             : 
     695       73044 :     const int value = static_cast<int>(site_and_count.second);
     696             :     DCHECK_LT(0, value);
     697       73044 :     if (site->IncrementMementoFoundCount(value)) {
     698             :       // For sites in the global map the count is accessed through the site.
     699        2886 :       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
     700             :     }
     701             :   }
     702      111048 : }
     703             : 
     704       28902 : void Heap::AddAllocationObserversToAllSpaces(
     705             :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     706             :   DCHECK(observer && new_space_observer);
     707             : 
     708      260118 :   for (SpaceIterator it(this); it.has_next();) {
     709             :     Space* space = it.next();
     710      231216 :     if (space == new_space()) {
     711       28902 :       space->AddAllocationObserver(new_space_observer);
     712             :     } else {
     713      202314 :       space->AddAllocationObserver(observer);
     714             :     }
     715             :   }
     716       28902 : }
     717             : 
     718          59 : void Heap::RemoveAllocationObserversFromAllSpaces(
     719             :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     720             :   DCHECK(observer && new_space_observer);
     721             : 
     722         531 :   for (SpaceIterator it(this); it.has_next();) {
     723             :     Space* space = it.next();
     724         472 :     if (space == new_space()) {
     725          59 :       space->RemoveAllocationObserver(new_space_observer);
     726             :     } else {
     727         413 :       space->RemoveAllocationObserver(observer);
     728             :     }
     729             :   }
     730          59 : }
     731             : 
     732             : class Heap::SkipStoreBufferScope {
     733             :  public:
     734             :   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
     735             :       : store_buffer_(store_buffer) {
     736       94928 :     store_buffer_->MoveAllEntriesToRememberedSet();
     737       94928 :     store_buffer_->SetMode(StoreBuffer::IN_GC);
     738             :   }
     739             : 
     740             :   ~SkipStoreBufferScope() {
     741             :     DCHECK(store_buffer_->Empty());
     742       94928 :     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
     743             :   }
     744             : 
     745             :  private:
     746             :   StoreBuffer* store_buffer_;
     747             : };
     748             : 
     749             : namespace {
     750        1326 : inline bool MakePretenureDecision(
     751             :     AllocationSite site, AllocationSite::PretenureDecision current_decision,
     752             :     double ratio, bool maximum_size_scavenge) {
     753             :   // Here we just allow state transitions from undecided or maybe tenure
     754             :   // to don't tenure, maybe tenure, or tenure.
     755        2652 :   if ((current_decision == AllocationSite::kUndecided ||
     756        1326 :        current_decision == AllocationSite::kMaybeTenure)) {
     757         806 :     if (ratio >= AllocationSite::kPretenureRatio) {
     758             :       // We just transition into tenure state when the semi-space was at
     759             :       // maximum capacity.
     760         632 :       if (maximum_size_scavenge) {
     761             :         site->set_deopt_dependent_code(true);
     762             :         site->set_pretenure_decision(AllocationSite::kTenure);
     763             :         // Currently we just need to deopt when we make a state transition to
     764             :         // tenure.
     765          44 :         return true;
     766             :       }
     767             :       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
     768             :     } else {
     769             :       site->set_pretenure_decision(AllocationSite::kDontTenure);
     770             :     }
     771             :   }
     772             :   return false;
     773             : }
     774             : 
     775        1326 : inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
     776             :                                       bool maximum_size_scavenge) {
     777             :   bool deopt = false;
     778             :   int create_count = site->memento_create_count();
     779             :   int found_count = site->memento_found_count();
     780             :   bool minimum_mementos_created =
     781             :       create_count >= AllocationSite::kPretenureMinimumCreated;
     782           0 :   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
     783        1326 :                      ? static_cast<double>(found_count) / create_count
     784        2652 :                      : 0.0;
     785             :   AllocationSite::PretenureDecision current_decision =
     786             :       site->pretenure_decision();
     787             : 
     788        1326 :   if (minimum_mementos_created) {
     789        1326 :     deopt = MakePretenureDecision(site, current_decision, ratio,
     790        1326 :                                   maximum_size_scavenge);
     791             :   }
     792             : 
     793        1326 :   if (FLAG_trace_pretenuring_statistics) {
     794           0 :     PrintIsolate(isolate,
     795             :                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
     796             :                  "(%d, %d, %f) %s => %s\n",
     797             :                  reinterpret_cast<void*>(site.ptr()), create_count, found_count,
     798             :                  ratio, site->PretenureDecisionName(current_decision),
     799           0 :                  site->PretenureDecisionName(site->pretenure_decision()));
     800             :   }
     801             : 
     802             :   // Clear feedback calculation fields until the next gc.
     803             :   site->set_memento_found_count(0);
     804             :   site->set_memento_create_count(0);
     805        1326 :   return deopt;
     806             : }
     807             : }  // namespace
     808             : 
     809           0 : void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
     810             :   global_pretenuring_feedback_.erase(site);
     811           0 : }
     812             : 
     813           0 : bool Heap::DeoptMaybeTenuredAllocationSites() {
     814       94928 :   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
     815             : }
     816             : 
     817       94928 : void Heap::ProcessPretenuringFeedback() {
     818       94928 :   bool trigger_deoptimization = false;
     819       94928 :   if (FLAG_allocation_site_pretenuring) {
     820             :     int tenure_decisions = 0;
     821             :     int dont_tenure_decisions = 0;
     822             :     int allocation_mementos_found = 0;
     823       94928 :     int allocation_sites = 0;
     824             :     int active_allocation_sites = 0;
     825             : 
     826       94928 :     AllocationSite site;
     827             : 
     828             :     // Step 1: Digest feedback for recorded allocation sites.
     829             :     bool maximum_size_scavenge = MaximumSizeScavenge();
     830       96254 :     for (auto& site_and_count : global_pretenuring_feedback_) {
     831        1326 :       allocation_sites++;
     832        1326 :       site = site_and_count.first;
     833             :       // Count is always access through the site.
     834             :       DCHECK_EQ(0, site_and_count.second);
     835             :       int found_count = site->memento_found_count();
     836             :       // An entry in the storage does not imply that the count is > 0 because
     837             :       // allocation sites might have been reset due to too many objects dying
     838             :       // in old space.
     839        1326 :       if (found_count > 0) {
     840             :         DCHECK(site->IsAllocationSite());
     841        1326 :         active_allocation_sites++;
     842        1326 :         allocation_mementos_found += found_count;
     843        1326 :         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
     844          44 :           trigger_deoptimization = true;
     845             :         }
     846        1326 :         if (site->GetAllocationType() == AllocationType::kOld) {
     847          50 :           tenure_decisions++;
     848             :         } else {
     849        1276 :           dont_tenure_decisions++;
     850             :         }
     851             :       }
     852             :     }
     853             : 
     854             :     // Step 2: Deopt maybe tenured allocation sites if necessary.
     855             :     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
     856       94928 :     if (deopt_maybe_tenured) {
     857          76 :       ForeachAllocationSite(
     858             :           allocation_sites_list(),
     859         413 :           [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
     860             :             DCHECK(site->IsAllocationSite());
     861         412 :             allocation_sites++;
     862         412 :             if (site->IsMaybeTenure()) {
     863             :               site->set_deopt_dependent_code(true);
     864           1 :               trigger_deoptimization = true;
     865             :             }
     866          76 :           });
     867             :     }
     868             : 
     869       94928 :     if (trigger_deoptimization) {
     870          24 :       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
     871             :     }
     872             : 
     873       94928 :     if (FLAG_trace_pretenuring_statistics &&
     874           0 :         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
     875             :          dont_tenure_decisions > 0)) {
     876           0 :       PrintIsolate(isolate(),
     877             :                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
     878             :                    "active_sites=%d "
     879             :                    "mementos=%d tenured=%d not_tenured=%d\n",
     880             :                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
     881             :                    active_allocation_sites, allocation_mementos_found,
     882           0 :                    tenure_decisions, dont_tenure_decisions);
     883             :     }
     884             : 
     885             :     global_pretenuring_feedback_.clear();
     886             :     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
     887             :   }
     888       94928 : }
     889             : 
     890      274410 : void Heap::InvalidateCodeDeoptimizationData(Code code) {
     891             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
     892      274410 :   CodePageMemoryModificationScope modification_scope(chunk);
     893      274410 :   code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
     894      274410 : }
     895             : 
     896          24 : void Heap::DeoptMarkedAllocationSites() {
     897             :   // TODO(hpayer): If iterating over the allocation sites list becomes a
     898             :   // performance issue, use a cache data structure in heap instead.
     899             : 
     900         150 :   ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
     901          81 :     if (site->deopt_dependent_code()) {
     902         135 :       site->dependent_code()->MarkCodeForDeoptimization(
     903          45 :           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
     904             :       site->set_deopt_dependent_code(false);
     905             :     }
     906         105 :   });
     907             : 
     908          24 :   Deoptimizer::DeoptimizeMarkedCode(isolate_);
     909          24 : }
     910             : 
     911             : 
     912       94928 : void Heap::GarbageCollectionEpilogue() {
     913      379712 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
     914       94928 :   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
     915           0 :     ZapFromSpace();
     916             :   }
     917             : 
     918             : #ifdef VERIFY_HEAP
     919             :   if (FLAG_verify_heap) {
     920             :     Verify();
     921             :   }
     922             : #endif
     923             : 
     924             :   AllowHeapAllocation for_the_rest_of_the_epilogue;
     925             : 
     926             : #ifdef DEBUG
     927             :   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
     928             :   if (FLAG_print_handles) PrintHandles();
     929             :   if (FLAG_gc_verbose) Print();
     930             :   if (FLAG_code_stats) ReportCodeStatistics("After GC");
     931             :   if (FLAG_check_handle_count) CheckHandleCount();
     932             : #endif
     933             : 
     934             :   UpdateMaximumCommitted();
     935             : 
     936       94928 :   isolate_->counters()->alive_after_last_gc()->Set(
     937             :       static_cast<int>(SizeOfObjects()));
     938             : 
     939       94928 :   isolate_->counters()->string_table_capacity()->Set(
     940             :       string_table()->Capacity());
     941       94928 :   isolate_->counters()->number_of_symbols()->Set(
     942             :       string_table()->NumberOfElements());
     943             : 
     944       94928 :   if (CommittedMemory() > 0) {
     945       94928 :     isolate_->counters()->external_fragmentation_total()->AddSample(
     946      189856 :         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
     947             : 
     948       94928 :     isolate_->counters()->heap_sample_total_committed()->AddSample(
     949      189856 :         static_cast<int>(CommittedMemory() / KB));
     950       94928 :     isolate_->counters()->heap_sample_total_used()->AddSample(
     951      189856 :         static_cast<int>(SizeOfObjects() / KB));
     952       94928 :     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
     953      189856 :         static_cast<int>(map_space()->CommittedMemory() / KB));
     954       94928 :     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
     955      189856 :         static_cast<int>(code_space()->CommittedMemory() / KB));
     956             : 
     957       94928 :     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
     958      189856 :         static_cast<int>(MaximumCommittedMemory() / KB));
     959             :   }
     960             : 
     961             : #define UPDATE_COUNTERS_FOR_SPACE(space)                \
     962             :   isolate_->counters()->space##_bytes_available()->Set( \
     963             :       static_cast<int>(space()->Available()));          \
     964             :   isolate_->counters()->space##_bytes_committed()->Set( \
     965             :       static_cast<int>(space()->CommittedMemory()));    \
     966             :   isolate_->counters()->space##_bytes_used()->Set(      \
     967             :       static_cast<int>(space()->SizeOfObjects()));
     968             : #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
     969             :   if (space()->CommittedMemory() > 0) {                                \
     970             :     isolate_->counters()->external_fragmentation_##space()->AddSample( \
     971             :         static_cast<int>(100 -                                         \
     972             :                          (space()->SizeOfObjects() * 100.0) /          \
     973             :                              space()->CommittedMemory()));             \
     974             :   }
     975             : #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
     976             :   UPDATE_COUNTERS_FOR_SPACE(space)                         \
     977             :   UPDATE_FRAGMENTATION_FOR_SPACE(space)
     978             : 
     979      284784 :   UPDATE_COUNTERS_FOR_SPACE(new_space)
     980      474640 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
     981      474640 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
     982      474640 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
     983      398190 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
     984             : #undef UPDATE_COUNTERS_FOR_SPACE
     985             : #undef UPDATE_FRAGMENTATION_FOR_SPACE
     986             : #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
     987             : 
     988             : #ifdef DEBUG
     989             :   ReportStatisticsAfterGC();
     990             : #endif  // DEBUG
     991             : 
     992       94928 :   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
     993             : 
     994             :   {
     995      379712 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
     996       94928 :     ReduceNewSpaceSize();
     997             :   }
     998             : 
     999       94928 :   if (FLAG_harmony_weak_refs) {
    1000             :     // TODO(marja): (spec): The exact condition on when to schedule the cleanup
    1001             :     // task is unclear. This version schedules the cleanup task for a
    1002             :     // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
    1003             :     // for it (at that point it might have leftover dirty WeakCells since an
    1004             :     // earlier invocation of the cleanup function didn't iterate through
    1005             :     // them). See https://github.com/tc39/proposal-weakrefs/issues/34
    1006             :     HandleScope handle_scope(isolate());
    1007         821 :     while (!isolate()->heap()->dirty_js_finalization_groups()->IsUndefined(
    1008             :         isolate())) {
    1009             :       // Enqueue one microtask per JSFinalizationGroup.
    1010             :       Handle<JSFinalizationGroup> finalization_group(
    1011             :           JSFinalizationGroup::cast(
    1012             :               isolate()->heap()->dirty_js_finalization_groups()),
    1013             :           isolate());
    1014             :       isolate()->heap()->set_dirty_js_finalization_groups(
    1015             :           finalization_group->next());
    1016         416 :       finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
    1017             :       Handle<NativeContext> context(finalization_group->native_context(),
    1018             :                                     isolate());
    1019             :       // GC has no native context, but we use the creation context of the
    1020             :       // JSFinalizationGroup for the EnqueueTask operation. This is consitent
    1021             :       // with the Promise implementation, assuming the JSFinalizationGroup's
    1022             :       // creation context is the "caller's context" in promise functions. An
    1023             :       // alternative would be to use the native context of the cleanup
    1024             :       // function. This difference shouldn't be observable from JavaScript,
    1025             :       // since we enter the native context of the cleanup function before
    1026             :       // calling it. TODO(marja): Revisit when the spec clarifies this. See also
    1027             :       // https://github.com/tc39/proposal-weakrefs/issues/38 .
    1028             :       Handle<FinalizationGroupCleanupJobTask> task =
    1029             :           isolate()->factory()->NewFinalizationGroupCleanupJobTask(
    1030         208 :               finalization_group);
    1031         416 :       context->microtask_queue()->EnqueueMicrotask(*task);
    1032             :     }
    1033             :   }
    1034       94928 : }
    1035             : 
    1036             : class GCCallbacksScope {
    1037             :  public:
    1038             :   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
    1039      228082 :     heap_->gc_callbacks_depth_++;
    1040             :   }
    1041      228082 :   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
    1042             : 
    1043       94928 :   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
    1044             : 
    1045             :  private:
    1046             :   Heap* heap_;
    1047             : };
    1048             : 
    1049             : 
    1050       12026 : void Heap::HandleGCRequest() {
    1051       12026 :   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
    1052             :     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
    1053           0 :     stress_scavenge_observer_->RequestedGCDone();
    1054       12026 :   } else if (HighMemoryPressure()) {
    1055             :     incremental_marking()->reset_request_type();
    1056           5 :     CheckMemoryPressure();
    1057       12021 :   } else if (incremental_marking()->request_type() ==
    1058             :              IncrementalMarking::COMPLETE_MARKING) {
    1059             :     incremental_marking()->reset_request_type();
    1060        5075 :     CollectAllGarbage(current_gc_flags_,
    1061             :                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
    1062             :                       current_gc_callback_flags_);
    1063        6946 :   } else if (incremental_marking()->request_type() ==
    1064        6946 :                  IncrementalMarking::FINALIZATION &&
    1065       13892 :              incremental_marking()->IsMarking() &&
    1066             :              !incremental_marking()->finalize_marking_completed()) {
    1067             :     incremental_marking()->reset_request_type();
    1068             :     FinalizeIncrementalMarkingIncrementally(
    1069        6945 :         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
    1070             :   }
    1071       12026 : }
    1072             : 
    1073             : 
    1074           0 : void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
    1075             :   DCHECK(FLAG_idle_time_scavenge);
    1076             :   DCHECK_NOT_NULL(scavenge_job_);
    1077       23459 :   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
    1078           0 : }
    1079             : 
    1080       94928 : TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
    1081       94928 :   if (IsYoungGenerationCollector(collector)) {
    1082       20973 :     if (isolate_->IsIsolateInBackground()) {
    1083           0 :       return isolate_->counters()->gc_scavenger_background();
    1084             :     }
    1085       20973 :     return isolate_->counters()->gc_scavenger_foreground();
    1086             :   } else {
    1087       73955 :     if (!incremental_marking()->IsStopped()) {
    1088       24809 :       if (ShouldReduceMemory()) {
    1089        2378 :         if (isolate_->IsIsolateInBackground()) {
    1090           0 :           return isolate_->counters()->gc_finalize_reduce_memory_background();
    1091             :         }
    1092        2378 :         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
    1093             :       } else {
    1094       22431 :         if (isolate_->IsIsolateInBackground()) {
    1095           0 :           return isolate_->counters()->gc_finalize_background();
    1096             :         }
    1097       22431 :         return isolate_->counters()->gc_finalize_foreground();
    1098             :       }
    1099             :     } else {
    1100       49146 :       if (isolate_->IsIsolateInBackground()) {
    1101           0 :         return isolate_->counters()->gc_compactor_background();
    1102             :       }
    1103       49146 :       return isolate_->counters()->gc_compactor_foreground();
    1104             :     }
    1105             :   }
    1106             : }
    1107             : 
    1108           0 : TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
    1109       94928 :   if (IsYoungGenerationCollector(collector)) {
    1110       20973 :     return isolate_->counters()->gc_scavenger();
    1111             :   } else {
    1112       73955 :     if (!incremental_marking()->IsStopped()) {
    1113       24809 :       if (ShouldReduceMemory()) {
    1114        2378 :         return isolate_->counters()->gc_finalize_reduce_memory();
    1115             :       } else {
    1116       22431 :         return isolate_->counters()->gc_finalize();
    1117             :       }
    1118             :     } else {
    1119       49146 :       return isolate_->counters()->gc_compactor();
    1120             :     }
    1121             :   }
    1122             : }
    1123             : 
    1124        4093 : void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
    1125             :                              const v8::GCCallbackFlags gc_callback_flags) {
    1126             :   // Since we are ignoring the return value, the exact choice of space does
    1127             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1128             :   // cause a full GC.
    1129             :   set_current_gc_flags(flags);
    1130       70338 :   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
    1131             :   set_current_gc_flags(kNoGCFlags);
    1132        4093 : }
    1133             : 
    1134             : namespace {
    1135             : 
    1136             : intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
    1137           0 :   int slots = size / kTaggedSize;
    1138             :   DCHECK_EQ(a->Size(), size);
    1139             :   DCHECK_EQ(b->Size(), size);
    1140           0 :   Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
    1141           0 :   Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
    1142           0 :   for (int i = 0; i < slots; i++) {
    1143           0 :     if (*slot_a != *slot_b) {
    1144           0 :       return *slot_a - *slot_b;
    1145             :     }
    1146           0 :     slot_a++;
    1147           0 :     slot_b++;
    1148             :   }
    1149             :   return 0;
    1150             : }
    1151             : 
    1152           0 : void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
    1153           0 :   if (objects.size() == 0) return;
    1154             : 
    1155           0 :   sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
    1156             :     intptr_t c = CompareWords(size, a, b);
    1157           0 :     if (c != 0) return c < 0;
    1158             :     return a < b;
    1159             :   });
    1160             : 
    1161             :   std::vector<std::pair<int, HeapObject>> duplicates;
    1162           0 :   HeapObject current = objects[0];
    1163             :   int count = 1;
    1164           0 :   for (size_t i = 1; i < objects.size(); i++) {
    1165           0 :     if (CompareWords(size, current, objects[i]) == 0) {
    1166           0 :       count++;
    1167             :     } else {
    1168           0 :       if (count > 1) {
    1169           0 :         duplicates.push_back(std::make_pair(count - 1, current));
    1170             :       }
    1171             :       count = 1;
    1172           0 :       current = objects[i];
    1173             :     }
    1174             :   }
    1175           0 :   if (count > 1) {
    1176           0 :     duplicates.push_back(std::make_pair(count - 1, current));
    1177             :   }
    1178             : 
    1179           0 :   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
    1180             : 
    1181             :   sort(duplicates.begin(), duplicates.end());
    1182           0 :   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
    1183           0 :     int duplicate_bytes = it->first * size;
    1184           0 :     if (duplicate_bytes < threshold) break;
    1185           0 :     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
    1186           0 :            duplicate_bytes / KB);
    1187           0 :     PrintF("Sample object: ");
    1188           0 :     it->second->Print();
    1189           0 :     PrintF("============================\n");
    1190             :   }
    1191             : }
    1192             : }  // anonymous namespace
    1193             : 
    1194        1256 : void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
    1195             :   // Since we are ignoring the return value, the exact choice of space does
    1196             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1197             :   // cause a full GC.
    1198             :   // Major GC would invoke weak handle callbacks on weakly reachable
    1199             :   // handles, but won't collect weakly reachable objects until next
    1200             :   // major GC.  Therefore if we collect aggressively and weak handle callback
    1201             :   // has been invoked, we rerun major GC to release objects which become
    1202             :   // garbage.
    1203             :   // Note: as weak callbacks can execute arbitrary code, we cannot
    1204             :   // hope that eventually there will be no weak callbacks invocations.
    1205             :   // Therefore stop recollecting after several attempts.
    1206        1256 :   if (gc_reason == GarbageCollectionReason::kLastResort) {
    1207           5 :     InvokeNearHeapLimitCallback();
    1208             :   }
    1209             :   RuntimeCallTimerScope runtime_timer(
    1210        1256 :       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
    1211             : 
    1212             :   // The optimizing compiler may be unnecessarily holding on to memory.
    1213        1256 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1214        1256 :   isolate()->ClearSerializerData();
    1215             :   set_current_gc_flags(kReduceMemoryFootprintMask);
    1216        1256 :   isolate_->compilation_cache()->Clear();
    1217             :   const int kMaxNumberOfAttempts = 7;
    1218             :   const int kMinNumberOfAttempts = 2;
    1219             :   const v8::GCCallbackFlags callback_flags =
    1220             :       gc_reason == GarbageCollectionReason::kLowMemoryNotification
    1221             :           ? v8::kGCCallbackFlagForced
    1222        1256 :           : v8::kGCCallbackFlagCollectAllAvailableGarbage;
    1223        3854 :   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
    1224        2555 :     if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
    1225             :         attempt + 1 >= kMinNumberOfAttempts) {
    1226             :       break;
    1227             :     }
    1228             :   }
    1229             : 
    1230             :   set_current_gc_flags(kNoGCFlags);
    1231        1256 :   new_space_->Shrink();
    1232        2512 :   new_lo_space_->SetCapacity(new_space_->Capacity());
    1233             :   UncommitFromSpace();
    1234        1256 :   EagerlyFreeExternalMemory();
    1235             : 
    1236        1256 :   if (FLAG_trace_duplicate_threshold_kb) {
    1237             :     std::map<int, std::vector<HeapObject>> objects_by_size;
    1238             :     PagedSpaces spaces(this);
    1239           0 :     for (PagedSpace* space = spaces.next(); space != nullptr;
    1240             :          space = spaces.next()) {
    1241           0 :       HeapObjectIterator it(space);
    1242           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1243           0 :         objects_by_size[obj->Size()].push_back(obj);
    1244             :       }
    1245             :     }
    1246             :     {
    1247           0 :       LargeObjectIterator it(lo_space());
    1248           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1249           0 :         objects_by_size[obj->Size()].push_back(obj);
    1250             :       }
    1251             :     }
    1252           0 :     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
    1253             :          ++it) {
    1254           0 :       ReportDuplicates(it->first, it->second);
    1255             :     }
    1256             :   }
    1257        1256 : }
    1258             : 
    1259       33446 : void Heap::PreciseCollectAllGarbage(int flags,
    1260             :                                     GarbageCollectionReason gc_reason,
    1261             :                                     const GCCallbackFlags gc_callback_flags) {
    1262       33446 :   if (!incremental_marking()->IsStopped()) {
    1263             :     FinalizeIncrementalMarkingAtomically(gc_reason);
    1264             :   }
    1265             :   CollectAllGarbage(flags, gc_reason, gc_callback_flags);
    1266       33446 : }
    1267             : 
    1268      979796 : void Heap::ReportExternalMemoryPressure() {
    1269             :   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
    1270             :       static_cast<GCCallbackFlags>(
    1271             :           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
    1272             :           kGCCallbackFlagCollectAllExternalMemory);
    1273     1959592 :   if (isolate()->isolate_data()->external_memory_ >
    1274     1959592 :       (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
    1275             :        external_memory_hard_limit())) {
    1276             :     CollectAllGarbage(
    1277             :         kReduceMemoryFootprintMask,
    1278             :         GarbageCollectionReason::kExternalMemoryPressure,
    1279             :         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
    1280             :                                      kGCCallbackFlagsForExternalMemory));
    1281             :     return;
    1282             :   }
    1283      978960 :   if (incremental_marking()->IsStopped()) {
    1284        1297 :     if (incremental_marking()->CanBeActivated()) {
    1285             :       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
    1286             :                               GarbageCollectionReason::kExternalMemoryPressure,
    1287             :                               kGCCallbackFlagsForExternalMemory);
    1288             :     } else {
    1289             :       CollectAllGarbage(i::Heap::kNoGCFlags,
    1290             :                         GarbageCollectionReason::kExternalMemoryPressure,
    1291             :                         kGCCallbackFlagsForExternalMemory);
    1292             :     }
    1293             :   } else {
    1294             :     // Incremental marking is turned on an has already been started.
    1295             :     const double kMinStepSize = 5;
    1296             :     const double kMaxStepSize = 10;
    1297      977663 :     const double ms_step = Min(
    1298             :         kMaxStepSize,
    1299             :         Max(kMinStepSize,
    1300     1955326 :             static_cast<double>(isolate()->isolate_data()->external_memory_) /
    1301      977663 :                 isolate()->isolate_data()->external_memory_limit_ *
    1302             :                 kMinStepSize));
    1303      977663 :     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
    1304             :     // Extend the gc callback flags with external memory flags.
    1305             :     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
    1306      977663 :         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
    1307             :     incremental_marking()->AdvanceWithDeadline(
    1308      977663 :         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
    1309             :   }
    1310             : }
    1311             : 
    1312       94928 : void Heap::EnsureFillerObjectAtTop() {
    1313             :   // There may be an allocation memento behind objects in new space. Upon
    1314             :   // evacuation of a non-full new space (or if we are on the last page) there
    1315             :   // may be uninitialized memory behind top. We fill the remainder of the page
    1316             :   // with a filler.
    1317       94928 :   Address to_top = new_space_->top();
    1318       94928 :   Page* page = Page::FromAddress(to_top - kTaggedSize);
    1319       94928 :   if (page->Contains(to_top)) {
    1320       93882 :     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
    1321       93882 :     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
    1322             :   }
    1323       94928 : }
    1324             : 
    1325       94928 : bool Heap::CollectGarbage(AllocationSpace space,
    1326             :                           GarbageCollectionReason gc_reason,
    1327             :                           const v8::GCCallbackFlags gc_callback_flags) {
    1328       94928 :   const char* collector_reason = nullptr;
    1329       94928 :   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
    1330       94928 :   is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
    1331             : 
    1332       94928 :   if (!CanExpandOldGeneration(new_space()->Capacity() +
    1333       94928 :                               new_lo_space()->Size())) {
    1334          64 :     InvokeNearHeapLimitCallback();
    1335             :   }
    1336             : 
    1337             :   // Ensure that all pending phantom callbacks are invoked.
    1338       94928 :   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
    1339             : 
    1340             :   // The VM is in the GC state until exiting this function.
    1341             :   VMState<GC> state(isolate());
    1342             : 
    1343             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    1344             :   // Reset the allocation timeout, but make sure to allow at least a few
    1345             :   // allocations after a collection. The reason for this is that we have a lot
    1346             :   // of allocation sequences and we assume that a garbage collection will allow
    1347             :   // the subsequent allocation attempts to go through.
    1348             :   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
    1349             :     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
    1350             :   }
    1351             : #endif
    1352             : 
    1353       94928 :   EnsureFillerObjectAtTop();
    1354             : 
    1355      115901 :   if (IsYoungGenerationCollector(collector) &&
    1356             :       !incremental_marking()->IsStopped()) {
    1357         732 :     if (FLAG_trace_incremental_marking) {
    1358             :       isolate()->PrintWithTimestamp(
    1359           0 :           "[IncrementalMarking] Scavenge during marking.\n");
    1360             :     }
    1361             :   }
    1362             : 
    1363             :   bool next_gc_likely_to_collect_more = false;
    1364             :   size_t committed_memory_before = 0;
    1365             : 
    1366       94928 :   if (collector == MARK_COMPACTOR) {
    1367       73955 :     committed_memory_before = CommittedOldGenerationMemory();
    1368             :   }
    1369             : 
    1370             :   {
    1371      189856 :     tracer()->Start(collector, gc_reason, collector_reason);
    1372             :     DCHECK(AllowHeapAllocation::IsAllowed());
    1373             :     DisallowHeapAllocation no_allocation_during_gc;
    1374       94928 :     GarbageCollectionPrologue();
    1375             : 
    1376             :     {
    1377             :       TimedHistogram* gc_type_timer = GCTypeTimer(collector);
    1378       94928 :       TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
    1379      284784 :       TRACE_EVENT0("v8", gc_type_timer->name());
    1380             : 
    1381       94928 :       TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
    1382             :       OptionalTimedHistogramScopeMode mode =
    1383       94928 :           isolate_->IsMemorySavingsModeActive()
    1384             :               ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
    1385       94928 :               : OptionalTimedHistogramScopeMode::TAKE_TIME;
    1386             :       OptionalTimedHistogramScope histogram_timer_priority_scope(
    1387             :           gc_type_priority_timer, isolate_, mode);
    1388             : 
    1389             :       next_gc_likely_to_collect_more =
    1390       94928 :           PerformGarbageCollection(collector, gc_callback_flags);
    1391       94928 :       if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
    1392       94928 :         tracer()->RecordGCPhasesHistograms(gc_type_timer);
    1393             :       }
    1394             :     }
    1395             : 
    1396             :     // Clear is_current_gc_forced now that the current GC is complete. Do this
    1397             :     // before GarbageCollectionEpilogue() since that could trigger another
    1398             :     // unforced GC.
    1399       94928 :     is_current_gc_forced_ = false;
    1400             : 
    1401       94928 :     GarbageCollectionEpilogue();
    1402       94928 :     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
    1403       73955 :       isolate()->CheckDetachedContextsAfterGC();
    1404             :     }
    1405             : 
    1406       94928 :     if (collector == MARK_COMPACTOR) {
    1407       73955 :       size_t committed_memory_after = CommittedOldGenerationMemory();
    1408       73955 :       size_t used_memory_after = OldGenerationSizeOfObjects();
    1409             :       MemoryReducer::Event event;
    1410       73955 :       event.type = MemoryReducer::kMarkCompact;
    1411       73955 :       event.time_ms = MonotonicallyIncreasingTimeInMs();
    1412             :       // Trigger one more GC if
    1413             :       // - this GC decreased committed memory,
    1414             :       // - there is high fragmentation,
    1415             :       // - there are live detached contexts.
    1416             :       event.next_gc_likely_to_collect_more =
    1417      147675 :           (committed_memory_before > committed_memory_after + MB) ||
    1418      147675 :           HasHighFragmentation(used_memory_after, committed_memory_after) ||
    1419       73955 :           (detached_contexts()->length() > 0);
    1420       73955 :       event.committed_memory = committed_memory_after;
    1421       73955 :       if (deserialization_complete_) {
    1422       73955 :         memory_reducer_->NotifyMarkCompact(event);
    1423             :       }
    1424       73986 :       if (initial_max_old_generation_size_ < max_old_generation_size_ &&
    1425          31 :           used_memory_after < initial_max_old_generation_size_threshold_) {
    1426           4 :         max_old_generation_size_ = initial_max_old_generation_size_;
    1427             :       }
    1428             :     }
    1429             : 
    1430       94928 :     tracer()->Stop(collector);
    1431             :   }
    1432             : 
    1433      168883 :   if (collector == MARK_COMPACTOR &&
    1434       73955 :       (gc_callback_flags & (kGCCallbackFlagForced |
    1435             :                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
    1436       33302 :     isolate()->CountUsage(v8::Isolate::kForcedGC);
    1437             :   }
    1438             : 
    1439             :   // Start incremental marking for the next cycle. We do this only for scavenger
    1440             :   // to avoid a loop where mark-compact causes another mark-compact.
    1441       94928 :   if (IsYoungGenerationCollector(collector)) {
    1442             :     StartIncrementalMarkingIfAllocationLimitIsReached(
    1443             :         GCFlagsForIncrementalMarking(),
    1444       20973 :         kGCCallbackScheduleIdleGarbageCollection);
    1445             :   }
    1446             : 
    1447       94928 :   return next_gc_likely_to_collect_more;
    1448             : }
    1449             : 
    1450             : 
    1451         650 : int Heap::NotifyContextDisposed(bool dependant_context) {
    1452         650 :   if (!dependant_context) {
    1453          10 :     tracer()->ResetSurvivalEvents();
    1454          10 :     old_generation_size_configured_ = false;
    1455          10 :     old_generation_allocation_limit_ = initial_old_generation_size_;
    1456             :     MemoryReducer::Event event;
    1457          10 :     event.type = MemoryReducer::kPossibleGarbage;
    1458          10 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    1459          10 :     memory_reducer_->NotifyPossibleGarbage(event);
    1460             :   }
    1461         650 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1462             : 
    1463         650 :   number_of_disposed_maps_ = retained_maps()->length();
    1464         650 :   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
    1465         650 :   return ++contexts_disposed_;
    1466             : }
    1467             : 
    1468        1235 : void Heap::StartIncrementalMarking(int gc_flags,
    1469             :                                    GarbageCollectionReason gc_reason,
    1470             :                                    GCCallbackFlags gc_callback_flags) {
    1471             :   DCHECK(incremental_marking()->IsStopped());
    1472             :   set_current_gc_flags(gc_flags);
    1473       28828 :   current_gc_callback_flags_ = gc_callback_flags;
    1474       28828 :   incremental_marking()->Start(gc_reason);
    1475        1235 : }
    1476             : 
    1477     1639323 : void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
    1478             :     int gc_flags, const GCCallbackFlags gc_callback_flags) {
    1479     1639323 :   if (incremental_marking()->IsStopped()) {
    1480     1412683 :     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
    1481     1412684 :     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
    1482        1236 :       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
    1483     1411448 :     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
    1484             :       StartIncrementalMarking(gc_flags,
    1485             :                               GarbageCollectionReason::kAllocationLimit,
    1486             :                               gc_callback_flags);
    1487             :     }
    1488             :   }
    1489     1639324 : }
    1490             : 
    1491          16 : void Heap::StartIdleIncrementalMarking(
    1492             :     GarbageCollectionReason gc_reason,
    1493             :     const GCCallbackFlags gc_callback_flags) {
    1494             :   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
    1495             :                           gc_callback_flags);
    1496          16 : }
    1497             : 
    1498        1387 : void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
    1499             :                         WriteBarrierMode mode) {
    1500        2064 :   if (len == 0) return;
    1501             : 
    1502             :   DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
    1503             :   ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
    1504        1387 :   ObjectSlot src = array->RawFieldOfElementAt(src_index);
    1505        2769 :   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
    1506         171 :     if (dst < src) {
    1507      686470 :       for (int i = 0; i < len; i++) {
    1508      343215 :         dst.Relaxed_Store(src.Relaxed_Load());
    1509             :         ++dst;
    1510             :         ++src;
    1511             :       }
    1512             :     } else {
    1513             :       // Copy backwards.
    1514         131 :       dst += len - 1;
    1515             :       src += len - 1;
    1516        2403 :       for (int i = 0; i < len; i++) {
    1517        1136 :         dst.Relaxed_Store(src.Relaxed_Load());
    1518             :         --dst;
    1519             :         --src;
    1520             :       }
    1521             :     }
    1522             :   } else {
    1523        1216 :     MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
    1524             :   }
    1525        1387 :   if (mode == SKIP_WRITE_BARRIER) return;
    1526         710 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
    1527             : }
    1528             : 
    1529      644358 : void Heap::CopyElements(FixedArray dst, FixedArray src, int dst_index,
    1530             :                         int src_index, int len, WriteBarrierMode mode) {
    1531             :   DCHECK_NE(dst, src);
    1532      644358 :   if (len == 0) return;
    1533             : 
    1534             :   DCHECK_NE(dst->map(), ReadOnlyRoots(this).fixed_cow_array_map());
    1535             :   ObjectSlot dst_slot = dst->RawFieldOfElementAt(dst_index);
    1536             :   ObjectSlot src_slot = src->RawFieldOfElementAt(src_index);
    1537      644358 :   MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
    1538             : 
    1539      644358 :   if (mode == SKIP_WRITE_BARRIER) return;
    1540      617399 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst, dst_index, len);
    1541             : }
    1542             : 
    1543             : #ifdef VERIFY_HEAP
    1544             : // Helper class for verifying the string table.
    1545             : class StringTableVerifier : public ObjectVisitor {
    1546             :  public:
    1547             :   explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
    1548             : 
    1549             :   void VisitPointers(HeapObject host, ObjectSlot start,
    1550             :                      ObjectSlot end) override {
    1551             :     // Visit all HeapObject pointers in [start, end).
    1552             :     for (ObjectSlot p = start; p < end; ++p) {
    1553             :       DCHECK(!HasWeakHeapObjectTag(*p));
    1554             :       if ((*p)->IsHeapObject()) {
    1555             :         HeapObject object = HeapObject::cast(*p);
    1556             :         // Check that the string is actually internalized.
    1557             :         CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
    1558             :               object->IsInternalizedString());
    1559             :       }
    1560             :     }
    1561             :   }
    1562             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    1563             :                      MaybeObjectSlot end) override {
    1564             :     UNREACHABLE();
    1565             :   }
    1566             : 
    1567             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
    1568             : 
    1569             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    1570             :     UNREACHABLE();
    1571             :   }
    1572             : 
    1573             :  private:
    1574             :   Isolate* isolate_;
    1575             : };
    1576             : 
    1577             : static void VerifyStringTable(Isolate* isolate) {
    1578             :   StringTableVerifier verifier(isolate);
    1579             :   isolate->heap()->string_table()->IterateElements(&verifier);
    1580             : }
    1581             : #endif  // VERIFY_HEAP
    1582             : 
    1583      213546 : bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
    1584             :   bool gc_performed = true;
    1585             :   int counter = 0;
    1586             :   static const int kThreshold = 20;
    1587      427098 :   while (gc_performed && counter++ < kThreshold) {
    1588             :     gc_performed = false;
    1589     1281294 :     for (int space = FIRST_SPACE;
    1590     1494843 :          space < SerializerDeserializer::kNumberOfSpaces; space++) {
    1591     1281320 :       Reservation* reservation = &reservations[space];
    1592             :       DCHECK_LE(1, reservation->size());
    1593     1281320 :       if (reservation->at(0).size == 0) {
    1594             :         DCHECK_EQ(1, reservation->size());
    1595             :         continue;
    1596             :       }
    1597             :       bool perform_gc = false;
    1598      427139 :       if (space == MAP_SPACE) {
    1599             :         // We allocate each map individually to avoid fragmentation.
    1600             :         maps->clear();
    1601             :         DCHECK_LE(reservation->size(), 2);
    1602             :         int reserved_size = 0;
    1603      303724 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1604             :         DCHECK_EQ(0, reserved_size % Map::kSize);
    1605      151862 :         int num_maps = reserved_size / Map::kSize;
    1606    45409038 :         for (int i = 0; i < num_maps; i++) {
    1607             :           // The deserializer will update the skip list.
    1608             :           AllocationResult allocation = map_space()->AllocateRawUnaligned(
    1609    22628609 :               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
    1610             :           HeapObject free_space;
    1611    22628626 :           if (allocation.To(&free_space)) {
    1612             :             // Mark with a free list node, in case we have a GC before
    1613             :             // deserializing.
    1614    22628626 :             Address free_space_address = free_space->address();
    1615             :             CreateFillerObjectAt(free_space_address, Map::kSize,
    1616    22628626 :                                  ClearRecordedSlots::kNo);
    1617    22628583 :             maps->push_back(free_space_address);
    1618             :           } else {
    1619             :             perform_gc = true;
    1620           0 :             break;
    1621             :           }
    1622             :         }
    1623      275277 :       } else if (space == LO_SPACE) {
    1624             :         // Just check that we can allocate during deserialization.
    1625             :         DCHECK_LE(reservation->size(), 2);
    1626             :         int reserved_size = 0;
    1627          80 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1628          40 :         perform_gc = !CanExpandOldGeneration(reserved_size);
    1629             :       } else {
    1630     1727410 :         for (auto& chunk : *reservation) {
    1631             :           AllocationResult allocation;
    1632     1452177 :           int size = chunk.size;
    1633             :           DCHECK_LE(static_cast<size_t>(size),
    1634             :                     MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1635             :                         static_cast<AllocationSpace>(space)));
    1636     1452177 :           if (space == NEW_SPACE) {
    1637             :             allocation = new_space()->AllocateRawUnaligned(size);
    1638             :           } else {
    1639             :             // The deserializer will update the skip list.
    1640             :             allocation = paged_space(space)->AllocateRawUnaligned(
    1641     1451968 :                 size, PagedSpace::IGNORE_SKIP_LIST);
    1642             :           }
    1643             :           HeapObject free_space;
    1644     1452176 :           if (allocation.To(&free_space)) {
    1645             :             // Mark with a free list node, in case we have a GC before
    1646             :             // deserializing.
    1647             :             Address free_space_address = free_space->address();
    1648             :             CreateFillerObjectAt(free_space_address, size,
    1649     1452174 :                                  ClearRecordedSlots::kNo);
    1650             :             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
    1651             :                       space);
    1652     1452173 :             chunk.start = free_space_address;
    1653     1452173 :             chunk.end = free_space_address + size;
    1654             :           } else {
    1655             :             perform_gc = true;
    1656             :             break;
    1657             :           }
    1658             :         }
    1659             :       }
    1660      427116 :       if (perform_gc) {
    1661             :         // We cannot perfom a GC with an uninitialized isolate. This check
    1662             :         // fails for example if the max old space size is chosen unwisely,
    1663             :         // so that we cannot allocate space to deserialize the initial heap.
    1664           3 :         if (!deserialization_complete_) {
    1665             :           V8::FatalProcessOutOfMemory(
    1666           0 :               isolate(), "insufficient memory to create an Isolate");
    1667             :         }
    1668           3 :         if (space == NEW_SPACE) {
    1669           0 :           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
    1670             :         } else {
    1671           3 :           if (counter > 1) {
    1672             :             CollectAllGarbage(kReduceMemoryFootprintMask,
    1673             :                               GarbageCollectionReason::kDeserializer);
    1674             :           } else {
    1675             :             CollectAllGarbage(kNoGCFlags,
    1676             :                               GarbageCollectionReason::kDeserializer);
    1677             :           }
    1678             :         }
    1679             :         gc_performed = true;
    1680             :         break;  // Abort for-loop over spaces and retry.
    1681             :       }
    1682             :     }
    1683             :   }
    1684             : 
    1685      213549 :   return !gc_performed;
    1686             : }
    1687             : 
    1688             : 
    1689       94928 : void Heap::EnsureFromSpaceIsCommitted() {
    1690      189856 :   if (new_space_->CommitFromSpaceIfNeeded()) return;
    1691             : 
    1692             :   // Committing memory to from space failed.
    1693             :   // Memory is exhausted and we will die.
    1694           0 :   FatalProcessOutOfMemory("Committing semi space failed.");
    1695             : }
    1696             : 
    1697             : 
    1698       94928 : void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
    1699       94928 :   if (start_new_space_size == 0) return;
    1700             : 
    1701       81877 :   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
    1702       81877 :                       static_cast<double>(start_new_space_size) * 100);
    1703             : 
    1704       81877 :   if (previous_semi_space_copied_object_size_ > 0) {
    1705             :     promotion_rate_ =
    1706       56250 :         (static_cast<double>(promoted_objects_size_) /
    1707       56250 :          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
    1708             :   } else {
    1709       25627 :     promotion_rate_ = 0;
    1710             :   }
    1711             : 
    1712             :   semi_space_copied_rate_ =
    1713       81877 :       (static_cast<double>(semi_space_copied_object_size_) /
    1714       81877 :        static_cast<double>(start_new_space_size) * 100);
    1715             : 
    1716       81877 :   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
    1717       81877 :   tracer()->AddSurvivalRatio(survival_rate);
    1718             : }
    1719             : 
    1720       94928 : bool Heap::PerformGarbageCollection(
    1721             :     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
    1722      189856 :   DisallowJavascriptExecution no_js(isolate());
    1723             : 
    1724             :   size_t freed_global_handles = 0;
    1725             : 
    1726       94928 :   if (!IsYoungGenerationCollector(collector)) {
    1727      147910 :     PROFILE(isolate_, CodeMovingGCEvent());
    1728             :   }
    1729             : 
    1730             : #ifdef VERIFY_HEAP
    1731             :   if (FLAG_verify_heap) {
    1732             :     VerifyStringTable(this->isolate());
    1733             :   }
    1734             : #endif
    1735             : 
    1736             :   GCType gc_type =
    1737       94928 :       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
    1738             : 
    1739             :   {
    1740             :     GCCallbacksScope scope(this);
    1741             :     // Temporary override any embedder stack state as callbacks may create their
    1742             :     // own state on the stack and recursively trigger GC.
    1743             :     EmbedderStackStateScope embedder_scope(
    1744             :         local_embedder_heap_tracer(),
    1745             :         EmbedderHeapTracer::EmbedderStackState::kUnknown);
    1746       94928 :     if (scope.CheckReenter()) {
    1747             :       AllowHeapAllocation allow_allocation;
    1748      189792 :       AllowJavascriptExecution allow_js(isolate());
    1749      379584 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
    1750      189792 :       VMState<EXTERNAL> state(isolate_);
    1751       94896 :       HandleScope handle_scope(isolate_);
    1752       94896 :       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
    1753             :     }
    1754             :   }
    1755             : 
    1756       94928 :   EnsureFromSpaceIsCommitted();
    1757             : 
    1758             :   size_t start_young_generation_size =
    1759       94928 :       Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
    1760             : 
    1761             :   {
    1762             :     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
    1763             : 
    1764       94928 :     switch (collector) {
    1765             :       case MARK_COMPACTOR:
    1766             :         UpdateOldGenerationAllocationCounter();
    1767             :         // Perform mark-sweep with optional compaction.
    1768       73955 :         MarkCompact();
    1769       73955 :         old_generation_size_configured_ = true;
    1770             :         // This should be updated before PostGarbageCollectionProcessing, which
    1771             :         // can cause another GC. Take into account the objects promoted during
    1772             :         // GC.
    1773             :         old_generation_allocation_counter_at_last_gc_ +=
    1774       73955 :             static_cast<size_t>(promoted_objects_size_);
    1775       73955 :         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
    1776       73955 :         break;
    1777             :       case MINOR_MARK_COMPACTOR:
    1778           0 :         MinorMarkCompact();
    1779           0 :         break;
    1780             :       case SCAVENGER:
    1781       20973 :         if ((fast_promotion_mode_ &&
    1782           0 :              CanExpandOldGeneration(new_space()->Size() +
    1783           0 :                                     new_lo_space()->Size()))) {
    1784             :           tracer()->NotifyYoungGenerationHandling(
    1785           0 :               YoungGenerationHandling::kFastPromotionDuringScavenge);
    1786           0 :           EvacuateYoungGeneration();
    1787             :         } else {
    1788             :           tracer()->NotifyYoungGenerationHandling(
    1789       20973 :               YoungGenerationHandling::kRegularScavenge);
    1790             : 
    1791       20973 :           Scavenge();
    1792             :         }
    1793             :         break;
    1794             :     }
    1795             : 
    1796       94928 :     ProcessPretenuringFeedback();
    1797             :   }
    1798             : 
    1799       94928 :   UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
    1800       94928 :   ConfigureInitialOldGenerationSize();
    1801             : 
    1802       94928 :   if (collector != MARK_COMPACTOR) {
    1803             :     // Objects that died in the new space might have been accounted
    1804             :     // as bytes marked ahead of schedule by the incremental marker.
    1805       20973 :     incremental_marking()->UpdateMarkedBytesAfterScavenge(
    1806       20973 :         start_young_generation_size - SurvivedYoungObjectSize());
    1807             :   }
    1808             : 
    1809       94928 :   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
    1810       94928 :     ComputeFastPromotionMode();
    1811             :   }
    1812             : 
    1813       94928 :   isolate_->counters()->objs_since_last_young()->Set(0);
    1814             : 
    1815             :   {
    1816      379712 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1817             :     // First round weak callbacks are not supposed to allocate and trigger
    1818             :     // nested GCs.
    1819             :     freed_global_handles =
    1820       94928 :         isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
    1821             :   }
    1822             : 
    1823       94928 :   if (collector == MARK_COMPACTOR) {
    1824      295820 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
    1825             :     // TraceEpilogue may trigger operations that invalidate global handles. It
    1826             :     // has to be called *after* all other operations that potentially touch and
    1827             :     // reset global handles. It is also still part of the main garbage
    1828             :     // collection pause and thus needs to be called *before* any operation that
    1829             :     // can potentially trigger recursive garbage
    1830       73955 :     local_embedder_heap_tracer()->TraceEpilogue();
    1831             :   }
    1832             : 
    1833             :   {
    1834      379712 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1835       94928 :     gc_post_processing_depth_++;
    1836             :     {
    1837             :       AllowHeapAllocation allow_allocation;
    1838      189856 :       AllowJavascriptExecution allow_js(isolate());
    1839             :       freed_global_handles +=
    1840       94928 :           isolate_->global_handles()->PostGarbageCollectionProcessing(
    1841       94928 :               collector, gc_callback_flags);
    1842             :     }
    1843       94928 :     gc_post_processing_depth_--;
    1844             :   }
    1845             : 
    1846       94928 :   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
    1847             : 
    1848             :   // Update relocatables.
    1849       94928 :   Relocatable::PostGarbageCollectionProcessing(isolate_);
    1850             : 
    1851       94928 :   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
    1852             :   double mutator_speed =
    1853       94928 :       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
    1854       94928 :   size_t old_gen_size = OldGenerationSizeOfObjects();
    1855       94928 :   if (collector == MARK_COMPACTOR) {
    1856             :     // Register the amount of external allocated memory.
    1857             :     isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
    1858       73955 :         isolate()->isolate_data()->external_memory_;
    1859             :     isolate()->isolate_data()->external_memory_limit_ =
    1860       73955 :         isolate()->isolate_data()->external_memory_ +
    1861       73955 :         kExternalAllocationSoftLimit;
    1862             : 
    1863             :     double max_factor =
    1864      147910 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1865      221865 :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1866             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1867       73955 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1868       73955 :     old_generation_allocation_limit_ = new_limit;
    1869             : 
    1870       73955 :     CheckIneffectiveMarkCompact(
    1871       73955 :         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
    1872       22517 :   } else if (HasLowYoungGenerationAllocationRate() &&
    1873        1544 :              old_generation_size_configured_) {
    1874             :     double max_factor =
    1875        1756 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1876        2634 :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1877             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1878         878 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1879         878 :     if (new_limit < old_generation_allocation_limit_) {
    1880          13 :       old_generation_allocation_limit_ = new_limit;
    1881             :     }
    1882             :   }
    1883             : 
    1884             :   {
    1885             :     GCCallbacksScope scope(this);
    1886       94928 :     if (scope.CheckReenter()) {
    1887             :       AllowHeapAllocation allow_allocation;
    1888      189792 :       AllowJavascriptExecution allow_js(isolate());
    1889      379584 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
    1890      189792 :       VMState<EXTERNAL> state(isolate_);
    1891       94896 :       HandleScope handle_scope(isolate_);
    1892       94896 :       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
    1893             :     }
    1894             :   }
    1895             : 
    1896             : #ifdef VERIFY_HEAP
    1897             :   if (FLAG_verify_heap) {
    1898             :     VerifyStringTable(this->isolate());
    1899             :   }
    1900             : #endif
    1901             : 
    1902      189856 :   return freed_global_handles > 0;
    1903             : }
    1904             : 
    1905             : 
    1906      122508 : void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1907             :   RuntimeCallTimerScope runtime_timer(
    1908      122508 :       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
    1909      122573 :   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
    1910          65 :     if (gc_type & info.gc_type) {
    1911             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1912          65 :       info.callback(isolate, gc_type, flags, info.data);
    1913             :     }
    1914             :   }
    1915      122508 : }
    1916             : 
    1917      122508 : void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1918             :   RuntimeCallTimerScope runtime_timer(
    1919      122508 :       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
    1920      245081 :   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
    1921      122573 :     if (gc_type & info.gc_type) {
    1922             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1923       74008 :       info.callback(isolate, gc_type, flags, info.data);
    1924             :     }
    1925             :   }
    1926      122508 : }
    1927             : 
    1928             : 
    1929       73955 : void Heap::MarkCompact() {
    1930      147910 :   PauseAllocationObserversScope pause_observers(this);
    1931             : 
    1932             :   SetGCState(MARK_COMPACT);
    1933             : 
    1934       73955 :   LOG(isolate_, ResourceEvent("markcompact", "begin"));
    1935             : 
    1936             :   uint64_t size_of_objects_before_gc = SizeOfObjects();
    1937             : 
    1938      147910 :   CodeSpaceMemoryModificationScope code_modifcation(this);
    1939             : 
    1940       73955 :   mark_compact_collector()->Prepare();
    1941             : 
    1942       73955 :   ms_count_++;
    1943             : 
    1944       73955 :   MarkCompactPrologue();
    1945             : 
    1946       73955 :   mark_compact_collector()->CollectGarbage();
    1947             : 
    1948       73955 :   LOG(isolate_, ResourceEvent("markcompact", "end"));
    1949             : 
    1950       73955 :   MarkCompactEpilogue();
    1951             : 
    1952       73955 :   if (FLAG_allocation_site_pretenuring) {
    1953       73955 :     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
    1954             :   }
    1955       73955 : }
    1956             : 
    1957           0 : void Heap::MinorMarkCompact() {
    1958             : #ifdef ENABLE_MINOR_MC
    1959             :   DCHECK(FLAG_minor_mc);
    1960             : 
    1961           0 :   PauseAllocationObserversScope pause_observers(this);
    1962             :   SetGCState(MINOR_MARK_COMPACT);
    1963           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
    1964             : 
    1965           0 :   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
    1966             :   AlwaysAllocateScope always_allocate(isolate());
    1967             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    1968             :       incremental_marking());
    1969           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1970             : 
    1971           0 :   minor_mark_compact_collector()->CollectGarbage();
    1972             : 
    1973           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
    1974             :   SetGCState(NOT_IN_GC);
    1975             : #else
    1976             :   UNREACHABLE();
    1977             : #endif  // ENABLE_MINOR_MC
    1978           0 : }
    1979             : 
    1980       73955 : void Heap::MarkCompactEpilogue() {
    1981      295820 :   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
    1982             :   SetGCState(NOT_IN_GC);
    1983             : 
    1984       73955 :   isolate_->counters()->objs_since_last_full()->Set(0);
    1985             : 
    1986       73955 :   incremental_marking()->Epilogue();
    1987             : 
    1988             :   DCHECK(incremental_marking()->IsStopped());
    1989       73955 : }
    1990             : 
    1991             : 
    1992       73955 : void Heap::MarkCompactPrologue() {
    1993      295820 :   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
    1994       73955 :   isolate_->descriptor_lookup_cache()->Clear();
    1995       73955 :   RegExpResultsCache::Clear(string_split_cache());
    1996       73955 :   RegExpResultsCache::Clear(regexp_multiple_cache());
    1997             : 
    1998       73955 :   isolate_->compilation_cache()->MarkCompactPrologue();
    1999             : 
    2000       73955 :   FlushNumberStringCache();
    2001       73955 : }
    2002             : 
    2003             : 
    2004       94928 : void Heap::CheckNewSpaceExpansionCriteria() {
    2005       94928 :   if (FLAG_experimental_new_space_growth_heuristic) {
    2006           0 :     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    2007           0 :         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
    2008             :       // Grow the size of new space if there is room to grow, and more than 10%
    2009             :       // have survived the last scavenge.
    2010           0 :       new_space_->Grow();
    2011           0 :       survived_since_last_expansion_ = 0;
    2012             :     }
    2013      187848 :   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    2014       92920 :              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
    2015             :     // Grow the size of new space if there is room to grow, and enough data
    2016             :     // has survived scavenge since the last expansion.
    2017        1689 :     new_space_->Grow();
    2018        1689 :     survived_since_last_expansion_ = 0;
    2019             :   }
    2020       94928 :   new_lo_space()->SetCapacity(new_space()->Capacity());
    2021       94928 : }
    2022             : 
    2023           0 : void Heap::EvacuateYoungGeneration() {
    2024           0 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
    2025             :   base::MutexGuard guard(relocation_mutex());
    2026           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2027             :   if (!FLAG_concurrent_marking) {
    2028             :     DCHECK(fast_promotion_mode_);
    2029             :     DCHECK(
    2030             :         CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
    2031             :   }
    2032             : 
    2033           0 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2034             : 
    2035             :   SetGCState(SCAVENGE);
    2036           0 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2037             : 
    2038             :   // Move pages from new->old generation.
    2039             :   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
    2040           0 :   for (auto it = range.begin(); it != range.end();) {
    2041             :     Page* p = (*++it)->prev_page();
    2042           0 :     new_space()->from_space().RemovePage(p);
    2043           0 :     Page::ConvertNewToOld(p);
    2044           0 :     if (incremental_marking()->IsMarking())
    2045           0 :       mark_compact_collector()->RecordLiveSlotsOnPage(p);
    2046             :   }
    2047             : 
    2048             :   // Reset new space.
    2049           0 :   if (!new_space()->Rebalance()) {
    2050           0 :     FatalProcessOutOfMemory("NewSpace::Rebalance");
    2051             :   }
    2052           0 :   new_space()->ResetLinearAllocationArea();
    2053             :   new_space()->set_age_mark(new_space()->top());
    2054             : 
    2055           0 :   for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
    2056             :     LargePage* page = *it;
    2057             :     // Increment has to happen after we save the page, because it is going to
    2058             :     // be removed below.
    2059             :     it++;
    2060           0 :     lo_space()->PromoteNewLargeObject(page);
    2061             :   }
    2062             : 
    2063             :   // Fix up special trackers.
    2064           0 :   external_string_table_.PromoteYoung();
    2065             :   // GlobalHandles are updated in PostGarbageCollectonProcessing
    2066             : 
    2067           0 :   size_t promoted = new_space()->Size() + new_lo_space()->Size();
    2068             :   IncrementYoungSurvivorsCounter(promoted);
    2069             :   IncrementPromotedObjectsSize(promoted);
    2070             :   IncrementSemiSpaceCopiedObjectSize(0);
    2071             : 
    2072           0 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2073             :   SetGCState(NOT_IN_GC);
    2074           0 : }
    2075             : 
    2076       20973 : void Heap::Scavenge() {
    2077       83892 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
    2078             :   base::MutexGuard guard(relocation_mutex());
    2079       41946 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2080             :   // There are soft limits in the allocation code, designed to trigger a mark
    2081             :   // sweep collection by failing allocations. There is no sense in trying to
    2082             :   // trigger one during scavenge: scavenges allocation should always succeed.
    2083             :   AlwaysAllocateScope scope(isolate());
    2084             : 
    2085             :   // Bump-pointer allocations done during scavenge are not real allocations.
    2086             :   // Pause the inline allocation steps.
    2087       41946 :   PauseAllocationObserversScope pause_observers(this);
    2088             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    2089             :       incremental_marking());
    2090             : 
    2091             : 
    2092       20973 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2093             : 
    2094             :   SetGCState(SCAVENGE);
    2095             : 
    2096             :   // Flip the semispaces.  After flipping, to space is empty, from space has
    2097             :   // live objects.
    2098       20973 :   new_space()->Flip();
    2099       20973 :   new_space()->ResetLinearAllocationArea();
    2100             : 
    2101             :   // We also flip the young generation large object space. All large objects
    2102             :   // will be in the from space.
    2103       20973 :   new_lo_space()->Flip();
    2104             :   new_lo_space()->ResetPendingObject();
    2105             : 
    2106             :   // Implements Cheney's copying algorithm
    2107       20973 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2108             : 
    2109       20973 :   scavenger_collector_->CollectGarbage();
    2110             : 
    2111       20973 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2112             : 
    2113             :   SetGCState(NOT_IN_GC);
    2114       20973 : }
    2115             : 
    2116       94928 : void Heap::ComputeFastPromotionMode() {
    2117             :   const size_t survived_in_new_space =
    2118      189856 :       survived_last_scavenge_ * 100 / new_space_->Capacity();
    2119             :   fast_promotion_mode_ =
    2120      189856 :       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
    2121       94928 :       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
    2122       94928 :       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
    2123       94928 :   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    2124           0 :     PrintIsolate(
    2125             :         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
    2126           0 :         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
    2127             :   }
    2128       94928 : }
    2129             : 
    2130     2036974 : void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
    2131     2036974 :   if (unprotected_memory_chunks_registry_enabled_) {
    2132     1824872 :     base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
    2133     1824869 :     if (unprotected_memory_chunks_.insert(chunk).second) {
    2134     1818854 :       chunk->SetReadAndWritable();
    2135             :     }
    2136             :   }
    2137     2036975 : }
    2138             : 
    2139     1210727 : void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
    2140     1943262 :   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
    2141     1210728 : }
    2142             : 
    2143      129739 : void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
    2144             :   unprotected_memory_chunks_.erase(chunk);
    2145      129739 : }
    2146             : 
    2147     1773834 : void Heap::ProtectUnprotectedMemoryChunks() {
    2148             :   DCHECK(unprotected_memory_chunks_registry_enabled_);
    2149     3592692 :   for (auto chunk = unprotected_memory_chunks_.begin();
    2150             :        chunk != unprotected_memory_chunks_.end(); chunk++) {
    2151     3637715 :     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
    2152     1818858 :     (*chunk)->SetDefaultCodePermissions();
    2153             :   }
    2154             :   unprotected_memory_chunks_.clear();
    2155     1773834 : }
    2156             : 
    2157           0 : bool Heap::ExternalStringTable::Contains(String string) {
    2158           0 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2159           0 :     if (young_strings_[i] == string) return true;
    2160             :   }
    2161           0 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2162           0 :     if (old_strings_[i] == string) return true;
    2163             :   }
    2164             :   return false;
    2165             : }
    2166             : 
    2167       87054 : void Heap::UpdateExternalString(String string, size_t old_payload,
    2168             :                                 size_t new_payload) {
    2169             :   DCHECK(string->IsExternalString());
    2170             :   Page* page = Page::FromHeapObject(string);
    2171             : 
    2172       87054 :   if (old_payload > new_payload) {
    2173          17 :     page->DecrementExternalBackingStoreBytes(
    2174          17 :         ExternalBackingStoreType::kExternalString, old_payload - new_payload);
    2175             :   } else {
    2176       87037 :     page->IncrementExternalBackingStoreBytes(
    2177       87037 :         ExternalBackingStoreType::kExternalString, new_payload - old_payload);
    2178             :   }
    2179       87056 : }
    2180             : 
    2181         113 : String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
    2182             :                                                             FullObjectSlot p) {
    2183             :   HeapObject obj = HeapObject::cast(*p);
    2184             :   MapWord first_word = obj->map_word();
    2185             : 
    2186             :   String new_string;
    2187             : 
    2188         113 :   if (InFromPage(obj)) {
    2189         113 :     if (!first_word.IsForwardingAddress()) {
    2190             :       // Unreachable external string can be finalized.
    2191             :       String string = String::cast(obj);
    2192         109 :       if (!string->IsExternalString()) {
    2193             :         // Original external string has been internalized.
    2194             :         DCHECK(string->IsThinString());
    2195           5 :         return String();
    2196             :       }
    2197         104 :       heap->FinalizeExternalString(string);
    2198         104 :       return String();
    2199             :     }
    2200             :     new_string = String::cast(first_word.ToForwardingAddress());
    2201             :   } else {
    2202             :     new_string = String::cast(obj);
    2203             :   }
    2204             : 
    2205             :   // String is still reachable.
    2206           4 :   if (new_string->IsThinString()) {
    2207             :     // Filtering Thin strings out of the external string table.
    2208           0 :     return String();
    2209           4 :   } else if (new_string->IsExternalString()) {
    2210           4 :     MemoryChunk::MoveExternalBackingStoreBytes(
    2211             :         ExternalBackingStoreType::kExternalString,
    2212             :         Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
    2213           8 :         ExternalString::cast(new_string)->ExternalPayloadSize());
    2214           4 :     return new_string;
    2215             :   }
    2216             : 
    2217             :   // Internalization can replace external strings with non-external strings.
    2218           0 :   return new_string->IsExternalString() ? new_string : String();
    2219             : }
    2220             : 
    2221           0 : void Heap::ExternalStringTable::VerifyYoung() {
    2222             : #ifdef DEBUG
    2223             :   std::set<String> visited_map;
    2224             :   std::map<MemoryChunk*, size_t> size_map;
    2225             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2226             :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2227             :     String obj = String::cast(young_strings_[i]);
    2228             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2229             :     DCHECK(mc->InYoungGeneration());
    2230             :     DCHECK(heap_->InYoungGeneration(obj));
    2231             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2232             :     DCHECK(obj->IsExternalString());
    2233             :     // Note: we can have repeated elements in the table.
    2234             :     DCHECK_EQ(0, visited_map.count(obj));
    2235             :     visited_map.insert(obj);
    2236             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2237             :   }
    2238             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2239             :        it != size_map.end(); it++)
    2240             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2241             : #endif
    2242           0 : }
    2243             : 
    2244           0 : void Heap::ExternalStringTable::Verify() {
    2245             : #ifdef DEBUG
    2246             :   std::set<String> visited_map;
    2247             :   std::map<MemoryChunk*, size_t> size_map;
    2248             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2249             :   VerifyYoung();
    2250             :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2251             :     String obj = String::cast(old_strings_[i]);
    2252             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2253             :     DCHECK(!mc->InYoungGeneration());
    2254             :     DCHECK(!heap_->InYoungGeneration(obj));
    2255             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2256             :     DCHECK(obj->IsExternalString());
    2257             :     // Note: we can have repeated elements in the table.
    2258             :     DCHECK_EQ(0, visited_map.count(obj));
    2259             :     visited_map.insert(obj);
    2260             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2261             :   }
    2262             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2263             :        it != size_map.end(); it++)
    2264             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2265             : #endif
    2266           0 : }
    2267             : 
    2268       94928 : void Heap::ExternalStringTable::UpdateYoungReferences(
    2269             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2270       94928 :   if (young_strings_.empty()) return;
    2271             : 
    2272             :   FullObjectSlot start(&young_strings_[0]);
    2273             :   FullObjectSlot end(&young_strings_[young_strings_.size()]);
    2274             :   FullObjectSlot last = start;
    2275             : 
    2276         150 :   for (FullObjectSlot p = start; p < end; ++p) {
    2277         133 :     String target = updater_func(heap_, p);
    2278             : 
    2279         242 :     if (target.is_null()) continue;
    2280             : 
    2281             :     DCHECK(target->IsExternalString());
    2282             : 
    2283          24 :     if (InYoungGeneration(target)) {
    2284             :       // String is still in new space. Update the table entry.
    2285             :       last.store(target);
    2286             :       ++last;
    2287             :     } else {
    2288             :       // String got promoted. Move it to the old string list.
    2289           0 :       old_strings_.push_back(target);
    2290             :     }
    2291             :   }
    2292             : 
    2293             :   DCHECK(last <= end);
    2294          17 :   young_strings_.resize(last - start);
    2295             : #ifdef VERIFY_HEAP
    2296             :   if (FLAG_verify_heap) {
    2297             :     VerifyYoung();
    2298             :   }
    2299             : #endif
    2300             : }
    2301             : 
    2302           0 : void Heap::ExternalStringTable::PromoteYoung() {
    2303           0 :   old_strings_.reserve(old_strings_.size() + young_strings_.size());
    2304             :   std::move(std::begin(young_strings_), std::end(young_strings_),
    2305             :             std::back_inserter(old_strings_));
    2306             :   young_strings_.clear();
    2307           0 : }
    2308             : 
    2309           0 : void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
    2310       75648 :   if (!young_strings_.empty()) {
    2311          32 :     v->VisitRootPointers(
    2312             :         Root::kExternalStringsTable, nullptr,
    2313             :         FullObjectSlot(&young_strings_[0]),
    2314          64 :         FullObjectSlot(&young_strings_[young_strings_.size()]));
    2315             :   }
    2316           0 : }
    2317             : 
    2318       75648 : void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
    2319             :   IterateYoung(v);
    2320       75648 :   if (!old_strings_.empty()) {
    2321       75593 :     v->VisitRootPointers(
    2322             :         Root::kExternalStringsTable, nullptr,
    2323             :         FullObjectSlot(old_strings_.data()),
    2324      151186 :         FullObjectSlot(old_strings_.data() + old_strings_.size()));
    2325             :   }
    2326       75648 : }
    2327             : 
    2328       20973 : void Heap::UpdateYoungReferencesInExternalStringTable(
    2329             :     ExternalStringTableUpdaterCallback updater_func) {
    2330       20973 :   external_string_table_.UpdateYoungReferences(updater_func);
    2331       20973 : }
    2332             : 
    2333       73955 : void Heap::ExternalStringTable::UpdateReferences(
    2334             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2335       73955 :   if (old_strings_.size() > 0) {
    2336             :     FullObjectSlot start(old_strings_.data());
    2337             :     FullObjectSlot end(old_strings_.data() + old_strings_.size());
    2338      175203 :     for (FullObjectSlot p = start; p < end; ++p)
    2339      202716 :       p.store(updater_func(heap_, p));
    2340             :   }
    2341             : 
    2342       73955 :   UpdateYoungReferences(updater_func);
    2343       73955 : }
    2344             : 
    2345       73955 : void Heap::UpdateReferencesInExternalStringTable(
    2346             :     ExternalStringTableUpdaterCallback updater_func) {
    2347       73955 :   external_string_table_.UpdateReferences(updater_func);
    2348       73955 : }
    2349             : 
    2350             : 
    2351       73955 : void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
    2352             :   ProcessNativeContexts(retainer);
    2353             :   ProcessAllocationSites(retainer);
    2354       73955 : }
    2355             : 
    2356             : 
    2357       20973 : void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
    2358             :   ProcessNativeContexts(retainer);
    2359       20973 : }
    2360             : 
    2361             : 
    2362           0 : void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
    2363       94928 :   Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
    2364             :   // Update the head of the list of contexts.
    2365             :   set_native_contexts_list(head);
    2366           0 : }
    2367             : 
    2368             : 
    2369           0 : void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
    2370             :   Object allocation_site_obj =
    2371       73955 :       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
    2372             :   set_allocation_sites_list(allocation_site_obj);
    2373           0 : }
    2374             : 
    2375       73955 : void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
    2376       73955 :   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
    2377       73955 :   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
    2378       73955 : }
    2379             : 
    2380         200 : void Heap::ForeachAllocationSite(
    2381             :     Object list, const std::function<void(AllocationSite)>& visitor) {
    2382             :   DisallowHeapAllocation disallow_heap_allocation;
    2383         200 :   Object current = list;
    2384        2580 :   while (current->IsAllocationSite()) {
    2385             :     AllocationSite site = AllocationSite::cast(current);
    2386             :     visitor(site);
    2387             :     Object current_nested = site->nested_site();
    2388        1246 :     while (current_nested->IsAllocationSite()) {
    2389             :       AllocationSite nested_site = AllocationSite::cast(current_nested);
    2390             :       visitor(nested_site);
    2391             :       current_nested = nested_site->nested_site();
    2392             :     }
    2393             :     current = site->weak_next();
    2394             :   }
    2395         200 : }
    2396             : 
    2397         100 : void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
    2398             :   DisallowHeapAllocation no_allocation_scope;
    2399         100 :   bool marked = false;
    2400             : 
    2401         100 :   ForeachAllocationSite(allocation_sites_list(),
    2402        1450 :                         [&marked, allocation, this](AllocationSite site) {
    2403        1450 :                           if (site->GetAllocationType() == allocation) {
    2404           0 :                             site->ResetPretenureDecision();
    2405             :                             site->set_deopt_dependent_code(true);
    2406           0 :                             marked = true;
    2407             :                             RemoveAllocationSitePretenuringFeedback(site);
    2408             :                             return;
    2409             :                           }
    2410         100 :                         });
    2411         100 :   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
    2412         100 : }
    2413             : 
    2414       73955 : void Heap::EvaluateOldSpaceLocalPretenuring(
    2415             :     uint64_t size_of_objects_before_gc) {
    2416             :   uint64_t size_of_objects_after_gc = SizeOfObjects();
    2417             :   double old_generation_survival_rate =
    2418       73955 :       (static_cast<double>(size_of_objects_after_gc) * 100) /
    2419       73955 :       static_cast<double>(size_of_objects_before_gc);
    2420             : 
    2421       73955 :   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
    2422             :     // Too many objects died in the old generation, pretenuring of wrong
    2423             :     // allocation sites may be the cause for that. We have to deopt all
    2424             :     // dependent code registered in the allocation sites to re-evaluate
    2425             :     // our pretenuring decisions.
    2426         100 :     ResetAllAllocationSitesDependentCode(AllocationType::kOld);
    2427         100 :     if (FLAG_trace_pretenuring) {
    2428             :       PrintF(
    2429             :           "Deopt all allocation sites dependent code due to low survival "
    2430             :           "rate in the old generation %f\n",
    2431           0 :           old_generation_survival_rate);
    2432             :     }
    2433             :   }
    2434       73955 : }
    2435             : 
    2436             : 
    2437           5 : void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
    2438             :   DisallowHeapAllocation no_allocation;
    2439             :   // All external strings are listed in the external string table.
    2440             : 
    2441           5 :   class ExternalStringTableVisitorAdapter : public RootVisitor {
    2442             :    public:
    2443             :     explicit ExternalStringTableVisitorAdapter(
    2444             :         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
    2445           5 :         : isolate_(isolate), visitor_(visitor) {}
    2446           5 :     void VisitRootPointers(Root root, const char* description,
    2447             :                            FullObjectSlot start, FullObjectSlot end) override {
    2448          35 :       for (FullObjectSlot p = start; p < end; ++p) {
    2449             :         DCHECK((*p)->IsExternalString());
    2450          75 :         visitor_->VisitExternalString(
    2451          50 :             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
    2452             :       }
    2453           5 :     }
    2454             : 
    2455             :    private:
    2456             :     Isolate* isolate_;
    2457             :     v8::ExternalResourceVisitor* visitor_;
    2458             :   } external_string_table_visitor(isolate(), visitor);
    2459             : 
    2460           5 :   external_string_table_.IterateAll(&external_string_table_visitor);
    2461           5 : }
    2462             : 
    2463             : STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
    2464             : 
    2465             : #ifdef V8_COMPRESS_POINTERS
    2466             : // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
    2467             : // is only kTaggedSize aligned but we can keep using unaligned access since
    2468             : // both x64 and arm64 architectures (where pointer compression supported)
    2469             : // allow unaligned access to doubles.
    2470             : STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kTaggedSize));
    2471             : #else
    2472             : STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kDoubleAlignment));
    2473             : #endif
    2474             : 
    2475             : #ifdef V8_HOST_ARCH_32_BIT
    2476             : STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
    2477             : #endif
    2478             : 
    2479             : 
    2480          45 : int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
    2481          45 :   switch (alignment) {
    2482             :     case kWordAligned:
    2483             :       return 0;
    2484             :     case kDoubleAligned:
    2485             :     case kDoubleUnaligned:
    2486          30 :       return kDoubleSize - kTaggedSize;
    2487             :     default:
    2488           0 :       UNREACHABLE();
    2489             :   }
    2490             :   return 0;
    2491             : }
    2492             : 
    2493             : 
    2494    81774691 : int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
    2495    81774691 :   if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
    2496             :     return kTaggedSize;
    2497    81794000 :   if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
    2498             :     return kDoubleSize - kTaggedSize;  // No fill if double is always aligned.
    2499    81793965 :   return 0;
    2500             : }
    2501             : 
    2502          20 : HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
    2503          20 :   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
    2504          40 :   return HeapObject::FromAddress(object->address() + filler_size);
    2505             : }
    2506             : 
    2507           0 : HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
    2508             :                                  int allocation_size,
    2509             :                                  AllocationAlignment alignment) {
    2510           0 :   int filler_size = allocation_size - object_size;
    2511             :   DCHECK_LT(0, filler_size);
    2512             :   int pre_filler = GetFillToAlign(object->address(), alignment);
    2513           0 :   if (pre_filler) {
    2514             :     object = PrecedeWithFiller(object, pre_filler);
    2515           0 :     filler_size -= pre_filler;
    2516             :   }
    2517           0 :   if (filler_size) {
    2518             :     CreateFillerObjectAt(object->address() + object_size, filler_size,
    2519           0 :                          ClearRecordedSlots::kNo);
    2520             :   }
    2521           0 :   return object;
    2522             : }
    2523             : 
    2524      516774 : void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
    2525      516774 :   ArrayBufferTracker::RegisterNew(this, buffer);
    2526      516814 : }
    2527             : 
    2528        5391 : void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
    2529        5391 :   ArrayBufferTracker::Unregister(this, buffer);
    2530        5391 : }
    2531             : 
    2532       94928 : void Heap::ConfigureInitialOldGenerationSize() {
    2533      100902 :   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
    2534             :     const size_t new_limit =
    2535       11888 :         Max(OldGenerationSizeOfObjects() +
    2536       11888 :                 heap_controller()->MinimumAllocationLimitGrowingStep(
    2537             :                     CurrentHeapGrowingMode()),
    2538             :             static_cast<size_t>(
    2539       11888 :                 static_cast<double>(old_generation_allocation_limit_) *
    2540        5944 :                 (tracer()->AverageSurvivalRatio() / 100)));
    2541        5944 :     if (new_limit < old_generation_allocation_limit_) {
    2542        5395 :       old_generation_allocation_limit_ = new_limit;
    2543             :     } else {
    2544         549 :       old_generation_size_configured_ = true;
    2545             :     }
    2546             :   }
    2547       94928 : }
    2548             : 
    2549       73955 : void Heap::FlushNumberStringCache() {
    2550             :   // Flush the number to string cache.
    2551             :   int len = number_string_cache()->length();
    2552   648785123 :   for (int i = 0; i < len; i++) {
    2553   324355584 :     number_string_cache()->set_undefined(i);
    2554             :   }
    2555       73955 : }
    2556             : 
    2557    93337331 : HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
    2558             :                                       ClearRecordedSlots clear_slots_mode,
    2559             :                                       ClearFreedMemoryMode clear_memory_mode) {
    2560    93337331 :   if (size == 0) return HeapObject();
    2561             :   HeapObject filler = HeapObject::FromAddress(addr);
    2562    92492804 :   if (size == kTaggedSize) {
    2563             :     filler->set_map_after_allocation(
    2564             :         Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
    2565             :         SKIP_WRITE_BARRIER);
    2566    87267851 :   } else if (size == 2 * kTaggedSize) {
    2567             :     filler->set_map_after_allocation(
    2568             :         Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
    2569             :         SKIP_WRITE_BARRIER);
    2570     2912936 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2571       10403 :       Memory<Tagged_t>(addr + kTaggedSize) =
    2572       10403 :           static_cast<Tagged_t>(kClearedFreeMemoryValue);
    2573             :     }
    2574             :   } else {
    2575             :     DCHECK_GT(size, 2 * kTaggedSize);
    2576             :     filler->set_map_after_allocation(
    2577             :         Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
    2578             :         SKIP_WRITE_BARRIER);
    2579             :     FreeSpace::cast(filler)->relaxed_write_size(size);
    2580    84354915 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2581       89500 :       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
    2582       89500 :                    (size / kTaggedSize) - 2);
    2583             :     }
    2584             :   }
    2585    92472728 :   if (clear_slots_mode == ClearRecordedSlots::kYes) {
    2586     1935826 :     ClearRecordedSlotRange(addr, addr + size);
    2587             :   }
    2588             : 
    2589             :   // At this point, we may be deserializing the heap from a snapshot, and
    2590             :   // none of the maps have been created yet and are nullptr.
    2591             :   DCHECK((filler->map_slot().contains_value(kNullAddress) &&
    2592             :           !deserialization_complete_) ||
    2593             :          filler->map()->IsMap());
    2594    92472728 :   return filler;
    2595             : }
    2596             : 
    2597      181562 : bool Heap::CanMoveObjectStart(HeapObject object) {
    2598      181562 :   if (!FLAG_move_object_start) return false;
    2599             : 
    2600             :   // Sampling heap profiler may have a reference to the object.
    2601      181562 :   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
    2602             : 
    2603      181562 :   if (IsLargeObject(object)) return false;
    2604             : 
    2605             :   // We can move the object start if the page was already swept.
    2606      181188 :   return Page::FromHeapObject(object)->SweepingDone();
    2607             : }
    2608             : 
    2609       44028 : bool Heap::IsImmovable(HeapObject object) {
    2610             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    2611      167577 :   return chunk->NeverEvacuate() || IsLargeObject(object);
    2612             : }
    2613             : 
    2614      797881 : bool Heap::IsLargeObject(HeapObject object) {
    2615      797881 :   return MemoryChunk::FromHeapObject(object)->IsLargePage();
    2616             : }
    2617             : 
    2618             : #ifdef ENABLE_SLOW_DCHECKS
    2619             : namespace {
    2620             : 
    2621             : class LeftTrimmerVerifierRootVisitor : public RootVisitor {
    2622             :  public:
    2623             :   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
    2624             :       : to_check_(to_check) {}
    2625             : 
    2626             :   void VisitRootPointers(Root root, const char* description,
    2627             :                          FullObjectSlot start, FullObjectSlot end) override {
    2628             :     for (FullObjectSlot p = start; p < end; ++p) {
    2629             :       DCHECK_NE(*p, to_check_);
    2630             :     }
    2631             :   }
    2632             : 
    2633             :  private:
    2634             :   FixedArrayBase to_check_;
    2635             : 
    2636             :   DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
    2637             : };
    2638             : }  // namespace
    2639             : #endif  // ENABLE_SLOW_DCHECKS
    2640             : 
    2641             : namespace {
    2642       45720 : bool MayContainRecordedSlots(HeapObject object) {
    2643             :   // New space object do not have recorded slots.
    2644       45720 :   if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
    2645             :   // Whitelist objects that definitely do not have pointers.
    2646        6095 :   if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
    2647             :   // Conservatively return true for other objects.
    2648        3047 :   return true;
    2649             : }
    2650             : }  // namespace
    2651             : 
    2652     1145300 : void Heap::OnMoveEvent(HeapObject target, HeapObject source,
    2653             :                        int size_in_bytes) {
    2654     1145300 :   HeapProfiler* heap_profiler = isolate_->heap_profiler();
    2655     1145300 :   if (heap_profiler->is_tracking_object_moves()) {
    2656             :     heap_profiler->ObjectMoveEvent(source->address(), target->address(),
    2657      132565 :                                    size_in_bytes);
    2658             :   }
    2659     1284094 :   for (auto& tracker : allocation_trackers_) {
    2660      272562 :     tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
    2661             :   }
    2662     1147813 :   if (target->IsSharedFunctionInfo()) {
    2663        4316 :     LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
    2664             :                                                          target->address()));
    2665             :   }
    2666             : 
    2667             :   if (FLAG_verify_predictable) {
    2668             :     ++allocations_count_;
    2669             :     // Advance synthetic time by making a time request.
    2670             :     MonotonicallyIncreasingTimeInMs();
    2671             : 
    2672             :     UpdateAllocationsHash(source);
    2673             :     UpdateAllocationsHash(target);
    2674             :     UpdateAllocationsHash(size_in_bytes);
    2675             : 
    2676             :     if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
    2677             :       PrintAllocationsHash();
    2678             :     }
    2679     1147351 :   } else if (FLAG_fuzzer_gc_analysis) {
    2680           0 :     ++allocations_count_;
    2681             :   }
    2682     1147351 : }
    2683             : 
    2684      181268 : FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
    2685             :                                         int elements_to_trim) {
    2686      181268 :   if (elements_to_trim == 0) {
    2687             :     // This simplifies reasoning in the rest of the function.
    2688           0 :     return object;
    2689             :   }
    2690      181268 :   CHECK(!object.is_null());
    2691             :   DCHECK(CanMoveObjectStart(object));
    2692             :   // Add custom visitor to concurrent marker if new left-trimmable type
    2693             :   // is added.
    2694             :   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
    2695      181268 :   const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
    2696      181268 :   const int bytes_to_trim = elements_to_trim * element_size;
    2697             :   Map map = object->map();
    2698             : 
    2699             :   // For now this trick is only applied to fixed arrays which may be in new
    2700             :   // space or old space. In a large object space the object's start must
    2701             :   // coincide with chunk and thus the trick is just not applicable.
    2702             :   DCHECK(!IsLargeObject(object));
    2703             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2704             : 
    2705             :   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
    2706             :   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
    2707             :   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
    2708             : 
    2709             :   const int len = object->length();
    2710             :   DCHECK(elements_to_trim <= len);
    2711             : 
    2712             :   // Calculate location of new array start.
    2713             :   Address old_start = object->address();
    2714      181268 :   Address new_start = old_start + bytes_to_trim;
    2715             : 
    2716      181268 :   if (incremental_marking()->IsMarking()) {
    2717             :     incremental_marking()->NotifyLeftTrimming(
    2718         365 :         object, HeapObject::FromAddress(new_start));
    2719             :   }
    2720             : 
    2721             :   // Technically in new space this write might be omitted (except for
    2722             :   // debug mode which iterates through the heap), but to play safer
    2723             :   // we still do it.
    2724             :   HeapObject filler =
    2725      181268 :       CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
    2726             : 
    2727             :   // Initialize header of the trimmed array. Since left trimming is only
    2728             :   // performed on pages which are not concurrently swept creating a filler
    2729             :   // object does not require synchronization.
    2730      181268 :   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
    2731      362536 :   RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
    2732             :                       Smi::FromInt(len - elements_to_trim));
    2733             : 
    2734             :   FixedArrayBase new_object =
    2735      181268 :       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
    2736             : 
    2737             :   // Remove recorded slots for the new map and length offset.
    2738             :   ClearRecordedSlot(new_object, new_object.RawField(0));
    2739             :   ClearRecordedSlot(new_object,
    2740             :                     new_object.RawField(FixedArrayBase::kLengthOffset));
    2741             : 
    2742             :   // Handle invalidated old-to-old slots.
    2743      181273 :   if (incremental_marking()->IsCompacting() &&
    2744           5 :       MayContainRecordedSlots(new_object)) {
    2745             :     // If the array was right-trimmed before, then it is registered in
    2746             :     // the invalidated_slots.
    2747             :     MemoryChunk::FromHeapObject(new_object)
    2748           5 :         ->MoveObjectWithInvalidatedSlots(filler, new_object);
    2749             :     // We have to clear slots in the free space to avoid stale old-to-old slots.
    2750             :     // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
    2751             :     // we need pointer granularity writes to avoid race with the concurrent
    2752             :     // marking.
    2753           5 :     if (filler->Size() > FreeSpace::kSize) {
    2754           5 :       MemsetTagged(filler.RawField(FreeSpace::kSize),
    2755             :                    ReadOnlyRoots(this).undefined_value(),
    2756           5 :                    (filler->Size() - FreeSpace::kSize) / kTaggedSize);
    2757             :     }
    2758             :   }
    2759             :   // Notify the heap profiler of change in object layout.
    2760      181268 :   OnMoveEvent(new_object, object, new_object->Size());
    2761             : 
    2762             : #ifdef ENABLE_SLOW_DCHECKS
    2763             :   if (FLAG_enable_slow_asserts) {
    2764             :     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
    2765             :     // to the original FixedArray (which is now the filler object).
    2766             :     LeftTrimmerVerifierRootVisitor root_visitor(object);
    2767             :     ReadOnlyRoots(this).Iterate(&root_visitor);
    2768             :     IterateRoots(&root_visitor, VISIT_ALL);
    2769             :   }
    2770             : #endif  // ENABLE_SLOW_DCHECKS
    2771             : 
    2772      181268 :   return new_object;
    2773             : }
    2774             : 
    2775     1437491 : void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
    2776             :   const int len = object->length();
    2777             :   DCHECK_LE(elements_to_trim, len);
    2778             :   DCHECK_GE(elements_to_trim, 0);
    2779             : 
    2780             :   int bytes_to_trim;
    2781             :   DCHECK(!object->IsFixedTypedArrayBase());
    2782     1437491 :   if (object->IsByteArray()) {
    2783        5115 :     int new_size = ByteArray::SizeFor(len - elements_to_trim);
    2784        5115 :     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
    2785             :     DCHECK_GE(bytes_to_trim, 0);
    2786     1432376 :   } else if (object->IsFixedArray()) {
    2787     1409621 :     CHECK_NE(elements_to_trim, len);
    2788     1409621 :     bytes_to_trim = elements_to_trim * kTaggedSize;
    2789             :   } else {
    2790             :     DCHECK(object->IsFixedDoubleArray());
    2791       22755 :     CHECK_NE(elements_to_trim, len);
    2792       22755 :     bytes_to_trim = elements_to_trim * kDoubleSize;
    2793             :   }
    2794             : 
    2795     1437491 :   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
    2796     1437491 : }
    2797             : 
    2798       17448 : void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
    2799             :                                    int elements_to_trim) {
    2800             :   // This function is safe to use only at the end of the mark compact
    2801             :   // collection: When marking, we record the weak slots, and shrinking
    2802             :   // invalidates them.
    2803             :   DCHECK_EQ(gc_state(), MARK_COMPACT);
    2804       17448 :   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
    2805       17448 :                                        elements_to_trim * kTaggedSize);
    2806       17448 : }
    2807             : 
    2808             : template <typename T>
    2809     1454939 : void Heap::CreateFillerForArray(T object, int elements_to_trim,
    2810             :                                 int bytes_to_trim) {
    2811             :   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
    2812             :          object->IsWeakFixedArray());
    2813             : 
    2814             :   // For now this trick is only applied to objects in new and paged space.
    2815             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2816             : 
    2817     1454939 :   if (bytes_to_trim == 0) {
    2818             :     DCHECK_EQ(elements_to_trim, 0);
    2819             :     // No need to create filler and update live bytes counters.
    2820             :     return;
    2821             :   }
    2822             : 
    2823             :   // Calculate location of new array end.
    2824     1454939 :   int old_size = object->Size();
    2825     1454939 :   Address old_end = object->address() + old_size;
    2826     1454939 :   Address new_end = old_end - bytes_to_trim;
    2827             : 
    2828             :   // Register the array as an object with invalidated old-to-old slots. We
    2829             :   // cannot use NotifyObjectLayoutChange as it would mark the array black,
    2830             :   // which is not safe for left-trimming because left-trimming re-pushes
    2831             :   // only grey arrays onto the marking worklist.
    2832     1456160 :   if (incremental_marking()->IsCompacting() &&
    2833        1221 :       MayContainRecordedSlots(object)) {
    2834             :     // Ensure that the object survives because the InvalidatedSlotsFilter will
    2835             :     // compute its size from its map during pointers updating phase.
    2836          35 :     incremental_marking()->WhiteToGreyAndPush(object);
    2837          35 :     MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    2838             :         object, old_size);
    2839             :   }
    2840             : 
    2841             :   // Technically in new space this write might be omitted (except for
    2842             :   // debug mode which iterates through the heap), but to play safer
    2843             :   // we still do it.
    2844             :   // We do not create a filler for objects in a large object space.
    2845     1454939 :   if (!IsLargeObject(object)) {
    2846             :     HeapObject filler =
    2847     1454492 :         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
    2848             :     DCHECK(!filler.is_null());
    2849             :     // Clear the mark bits of the black area that belongs now to the filler.
    2850             :     // This is an optimization. The sweeper will release black fillers anyway.
    2851     1579465 :     if (incremental_marking()->black_allocation() &&
    2852             :         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
    2853             :       Page* page = Page::FromAddress(new_end);
    2854         314 :       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
    2855             :           page->AddressToMarkbitIndex(new_end),
    2856             :           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
    2857             :     }
    2858             :   }
    2859             : 
    2860             :   // Initialize header of the trimmed array. We are storing the new length
    2861             :   // using release store after creating a filler for the left-over space to
    2862             :   // avoid races with the sweeper thread.
    2863     1454939 :   object->synchronized_set_length(object->length() - elements_to_trim);
    2864             : 
    2865             :   // Notify the heap object allocation tracker of change in object layout. The
    2866             :   // array may not be moved during GC, and size has to be adjusted nevertheless.
    2867     1457826 :   for (auto& tracker : allocation_trackers_) {
    2868        5774 :     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
    2869             :   }
    2870             : }
    2871             : 
    2872           0 : void Heap::MakeHeapIterable() {
    2873        7820 :   mark_compact_collector()->EnsureSweepingCompleted();
    2874           0 : }
    2875             : 
    2876             : 
    2877             : static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
    2878             :   const double kMinMutatorUtilization = 0.0;
    2879             :   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
    2880       21012 :   if (mutator_speed == 0) return kMinMutatorUtilization;
    2881       19050 :   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
    2882             :   // Derivation:
    2883             :   // mutator_utilization = mutator_time / (mutator_time + gc_time)
    2884             :   // mutator_time = 1 / mutator_speed
    2885             :   // gc_time = 1 / gc_speed
    2886             :   // mutator_utilization = (1 / mutator_speed) /
    2887             :   //                       (1 / mutator_speed + 1 / gc_speed)
    2888             :   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
    2889       19050 :   return gc_speed / (mutator_speed + gc_speed);
    2890             : }
    2891             : 
    2892             : 
    2893       20996 : double Heap::YoungGenerationMutatorUtilization() {
    2894             :   double mutator_speed = static_cast<double>(
    2895       20996 :       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
    2896             :   double gc_speed =
    2897       20996 :       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
    2898             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2899       20996 :   if (FLAG_trace_mutator_utilization) {
    2900             :     isolate()->PrintWithTimestamp(
    2901             :         "Young generation mutator utilization = %.3f ("
    2902             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2903           0 :         result, mutator_speed, gc_speed);
    2904             :   }
    2905       20996 :   return result;
    2906             : }
    2907             : 
    2908             : 
    2909          16 : double Heap::OldGenerationMutatorUtilization() {
    2910             :   double mutator_speed = static_cast<double>(
    2911          16 :       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
    2912             :   double gc_speed = static_cast<double>(
    2913          16 :       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
    2914             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2915          16 :   if (FLAG_trace_mutator_utilization) {
    2916             :     isolate()->PrintWithTimestamp(
    2917             :         "Old generation mutator utilization = %.3f ("
    2918             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2919           0 :         result, mutator_speed, gc_speed);
    2920             :   }
    2921          16 :   return result;
    2922             : }
    2923             : 
    2924             : 
    2925           0 : bool Heap::HasLowYoungGenerationAllocationRate() {
    2926             :   const double high_mutator_utilization = 0.993;
    2927       20996 :   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
    2928             : }
    2929             : 
    2930             : 
    2931           0 : bool Heap::HasLowOldGenerationAllocationRate() {
    2932             :   const double high_mutator_utilization = 0.993;
    2933          16 :   return OldGenerationMutatorUtilization() > high_mutator_utilization;
    2934             : }
    2935             : 
    2936             : 
    2937          23 : bool Heap::HasLowAllocationRate() {
    2938          39 :   return HasLowYoungGenerationAllocationRate() &&
    2939          23 :          HasLowOldGenerationAllocationRate();
    2940             : }
    2941             : 
    2942           0 : bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
    2943             :                                     double mutator_utilization) {
    2944             :   const double kHighHeapPercentage = 0.8;
    2945             :   const double kLowMutatorUtilization = 0.4;
    2946       73254 :   return old_generation_size >=
    2947       73254 :              kHighHeapPercentage * max_old_generation_size_ &&
    2948           0 :          mutator_utilization < kLowMutatorUtilization;
    2949             : }
    2950             : 
    2951       73955 : void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
    2952             :                                        double mutator_utilization) {
    2953             :   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
    2954       73955 :   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
    2955       73254 :   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
    2956       73252 :     consecutive_ineffective_mark_compacts_ = 0;
    2957       73252 :     return;
    2958             :   }
    2959           2 :   ++consecutive_ineffective_mark_compacts_;
    2960           2 :   if (consecutive_ineffective_mark_compacts_ ==
    2961             :       kMaxConsecutiveIneffectiveMarkCompacts) {
    2962           0 :     if (InvokeNearHeapLimitCallback()) {
    2963             :       // The callback increased the heap limit.
    2964           0 :       consecutive_ineffective_mark_compacts_ = 0;
    2965           0 :       return;
    2966             :     }
    2967           0 :     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
    2968             :   }
    2969             : }
    2970             : 
    2971           0 : bool Heap::HasHighFragmentation() {
    2972           0 :   size_t used = OldGenerationSizeOfObjects();
    2973           0 :   size_t committed = CommittedOldGenerationMemory();
    2974           0 :   return HasHighFragmentation(used, committed);
    2975             : }
    2976             : 
    2977           0 : bool Heap::HasHighFragmentation(size_t used, size_t committed) {
    2978             :   const size_t kSlack = 16 * MB;
    2979             :   // Fragmentation is high if committed > 2 * used + kSlack.
    2980             :   // Rewrite the exression to avoid overflow.
    2981             :   DCHECK_GE(committed, used);
    2982       73720 :   return committed - used > used + kSlack;
    2983             : }
    2984             : 
    2985     1839934 : bool Heap::ShouldOptimizeForMemoryUsage() {
    2986     1839934 :   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
    2987     1839934 :   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
    2988     5519802 :          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
    2989     3679863 :          !CanExpandOldGeneration(kOldGenerationSlack);
    2990             : }
    2991             : 
    2992           0 : void Heap::ActivateMemoryReducerIfNeeded() {
    2993             :   // Activate memory reducer when switching to background if
    2994             :   // - there was no mark compact since the start.
    2995             :   // - the committed memory can be potentially reduced.
    2996             :   // 2 pages for the old, code, and map space + 1 page for new space.
    2997             :   const int kMinCommittedMemory = 7 * Page::kPageSize;
    2998           0 :   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
    2999             :       isolate()->IsIsolateInBackground()) {
    3000             :     MemoryReducer::Event event;
    3001           0 :     event.type = MemoryReducer::kPossibleGarbage;
    3002           0 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3003           0 :     memory_reducer_->NotifyPossibleGarbage(event);
    3004             :   }
    3005           0 : }
    3006             : 
    3007       94928 : void Heap::ReduceNewSpaceSize() {
    3008             :   // TODO(ulan): Unify this constant with the similar constant in
    3009             :   // GCIdleTimeHandler once the change is merged to 4.5.
    3010             :   static const size_t kLowAllocationThroughput = 1000;
    3011             :   const double allocation_throughput =
    3012       94928 :       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
    3013             : 
    3014       94928 :   if (FLAG_predictable) return;
    3015             : 
    3016      189276 :   if (ShouldReduceMemory() ||
    3017       66264 :       ((allocation_throughput != 0) &&
    3018             :        (allocation_throughput < kLowAllocationThroughput))) {
    3019       25027 :     new_space_->Shrink();
    3020       50054 :     new_lo_space_->SetCapacity(new_space_->Capacity());
    3021             :     UncommitFromSpace();
    3022             :   }
    3023             : }
    3024             : 
    3025       30149 : void Heap::FinalizeIncrementalMarkingIfComplete(
    3026             :     GarbageCollectionReason gc_reason) {
    3027       89855 :   if (incremental_marking()->IsMarking() &&
    3028       17718 :       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
    3029        4446 :        (!incremental_marking()->finalize_marking_completed() &&
    3030        4775 :         mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3031         329 :         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
    3032       12168 :     FinalizeIncrementalMarkingIncrementally(gc_reason);
    3033       40764 :   } else if (incremental_marking()->IsComplete() ||
    3034        5421 :              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3035             :               local_embedder_heap_tracer()
    3036         619 :                   ->ShouldFinalizeIncrementalMarking())) {
    3037       13798 :     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3038             :   }
    3039       30149 : }
    3040             : 
    3041           5 : void Heap::FinalizeIncrementalMarkingAtomically(
    3042             :     GarbageCollectionReason gc_reason) {
    3043             :   DCHECK(!incremental_marking()->IsStopped());
    3044        2705 :   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3045           5 : }
    3046             : 
    3047       19113 : void Heap::FinalizeIncrementalMarkingIncrementally(
    3048             :     GarbageCollectionReason gc_reason) {
    3049       19113 :   if (FLAG_trace_incremental_marking) {
    3050           0 :     isolate()->PrintWithTimestamp(
    3051             :         "[IncrementalMarking] (%s).\n",
    3052           0 :         Heap::GarbageCollectionReasonToString(gc_reason));
    3053             :   }
    3054             : 
    3055             :   HistogramTimerScope incremental_marking_scope(
    3056             :       isolate()->counters()->gc_incremental_marking_finalize());
    3057       57339 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
    3058       76452 :   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
    3059             : 
    3060             :   {
    3061             :     GCCallbacksScope scope(this);
    3062       19113 :     if (scope.CheckReenter()) {
    3063             :       AllowHeapAllocation allow_allocation;
    3064       76452 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
    3065       38226 :       VMState<EXTERNAL> state(isolate_);
    3066       19113 :       HandleScope handle_scope(isolate_);
    3067       19113 :       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3068             :     }
    3069             :   }
    3070       19113 :   incremental_marking()->FinalizeIncrementally();
    3071             :   {
    3072             :     GCCallbacksScope scope(this);
    3073       19113 :     if (scope.CheckReenter()) {
    3074             :       AllowHeapAllocation allow_allocation;
    3075       76452 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
    3076       38226 :       VMState<EXTERNAL> state(isolate_);
    3077       19113 :       HandleScope handle_scope(isolate_);
    3078       19113 :       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3079             :     }
    3080             :   }
    3081       19113 : }
    3082             : 
    3083       90593 : void Heap::RegisterDeserializedObjectsForBlackAllocation(
    3084             :     Reservation* reservations, const std::vector<HeapObject>& large_objects,
    3085             :     const std::vector<Address>& maps) {
    3086             :   // TODO(ulan): pause black allocation during deserialization to avoid
    3087             :   // iterating all these objects in one go.
    3088             : 
    3089       90593 :   if (!incremental_marking()->black_allocation()) return;
    3090             : 
    3091             :   // Iterate black objects in old space, code space, map space, and large
    3092             :   // object space for side effects.
    3093             :   IncrementalMarking::MarkingState* marking_state =
    3094             :       incremental_marking()->marking_state();
    3095      119960 :   for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
    3096       53315 :     const Heap::Reservation& res = reservations[i];
    3097      279810 :     for (auto& chunk : res) {
    3098      226494 :       Address addr = chunk.start;
    3099    31360740 :       while (addr < chunk.end) {
    3100    15567122 :         HeapObject obj = HeapObject::FromAddress(addr);
    3101             :         // Objects can have any color because incremental marking can
    3102             :         // start in the middle of Heap::ReserveSpace().
    3103    15567122 :         if (marking_state->IsBlack(obj)) {
    3104    15567113 :           incremental_marking()->ProcessBlackAllocatedObject(obj);
    3105             :         }
    3106    15567138 :         addr += obj->Size();
    3107             :       }
    3108             :     }
    3109             :   }
    3110             : 
    3111             :   // Large object space doesn't use reservations, so it needs custom handling.
    3112       13349 :   for (HeapObject object : large_objects) {
    3113          20 :     incremental_marking()->ProcessBlackAllocatedObject(object);
    3114             :   }
    3115             : 
    3116             :   // Map space doesn't use reservations, so it needs custom handling.
    3117     3322839 :   for (Address addr : maps) {
    3118             :     incremental_marking()->ProcessBlackAllocatedObject(
    3119     3309510 :         HeapObject::FromAddress(addr));
    3120             :   }
    3121             : }
    3122             : 
    3123    42280206 : void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
    3124             :                                     const DisallowHeapAllocation&) {
    3125    42280206 :   if (incremental_marking()->IsMarking()) {
    3126     3416963 :     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
    3127     3461467 :     if (incremental_marking()->IsCompacting() &&
    3128       44494 :         MayContainRecordedSlots(object)) {
    3129             :       MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    3130        3007 :           object, size);
    3131             :     }
    3132             :   }
    3133             : #ifdef VERIFY_HEAP
    3134             :   if (FLAG_verify_heap) {
    3135             :     DCHECK(pending_layout_change_object_.is_null());
    3136             :     pending_layout_change_object_ = object;
    3137             :   }
    3138             : #endif
    3139    42280216 : }
    3140             : 
    3141             : #ifdef VERIFY_HEAP
    3142             : // Helper class for collecting slot addresses.
    3143             : class SlotCollectingVisitor final : public ObjectVisitor {
    3144             :  public:
    3145             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3146             :                      ObjectSlot end) override {
    3147             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3148             :   }
    3149             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3150             :                      MaybeObjectSlot end) final {
    3151             :     for (MaybeObjectSlot p = start; p < end; ++p) {
    3152             :       slots_.push_back(p);
    3153             :     }
    3154             :   }
    3155             : 
    3156             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    3157             : 
    3158             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3159             :     UNREACHABLE();
    3160             :   }
    3161             : 
    3162             :   int number_of_slots() { return static_cast<int>(slots_.size()); }
    3163             : 
    3164             :   MaybeObjectSlot slot(int i) { return slots_[i]; }
    3165             : 
    3166             :  private:
    3167             :   std::vector<MaybeObjectSlot> slots_;
    3168             : };
    3169             : 
    3170             : void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
    3171             :   if (!FLAG_verify_heap) return;
    3172             : 
    3173             :   // Check that Heap::NotifyObjectLayout was called for object transitions
    3174             :   // that are not safe for concurrent marking.
    3175             :   // If you see this check triggering for a freshly allocated object,
    3176             :   // use object->set_map_after_allocation() to initialize its map.
    3177             :   if (pending_layout_change_object_.is_null()) {
    3178             :     if (object->IsJSObject()) {
    3179             :       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
    3180             :     } else {
    3181             :       // Check that the set of slots before and after the transition match.
    3182             :       SlotCollectingVisitor old_visitor;
    3183             :       object->IterateFast(&old_visitor);
    3184             :       MapWord old_map_word = object->map_word();
    3185             :       // Temporarily set the new map to iterate new slots.
    3186             :       object->set_map_word(MapWord::FromMap(new_map));
    3187             :       SlotCollectingVisitor new_visitor;
    3188             :       object->IterateFast(&new_visitor);
    3189             :       // Restore the old map.
    3190             :       object->set_map_word(old_map_word);
    3191             :       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
    3192             :       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
    3193             :         DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
    3194             :       }
    3195             :     }
    3196             :   } else {
    3197             :     DCHECK_EQ(pending_layout_change_object_, object);
    3198             :     pending_layout_change_object_ = HeapObject();
    3199             :   }
    3200             : }
    3201             : #endif
    3202             : 
    3203         469 : GCIdleTimeHeapState Heap::ComputeHeapState() {
    3204             :   GCIdleTimeHeapState heap_state;
    3205         469 :   heap_state.contexts_disposed = contexts_disposed_;
    3206             :   heap_state.contexts_disposal_rate =
    3207         469 :       tracer()->ContextDisposalRateInMilliseconds();
    3208         469 :   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
    3209         469 :   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
    3210         469 :   return heap_state;
    3211             : }
    3212             : 
    3213             : 
    3214         469 : bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
    3215             :                                  GCIdleTimeHeapState heap_state,
    3216             :                                  double deadline_in_ms) {
    3217             :   bool result = false;
    3218         469 :   switch (action) {
    3219             :     case GCIdleTimeAction::kDone:
    3220             :       result = true;
    3221         248 :       break;
    3222             :     case GCIdleTimeAction::kIncrementalStep: {
    3223             :       incremental_marking()->AdvanceWithDeadline(
    3224             :           deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
    3225          37 :           StepOrigin::kTask);
    3226             :       FinalizeIncrementalMarkingIfComplete(
    3227          37 :           GarbageCollectionReason::kFinalizeMarkingViaTask);
    3228             :       result = incremental_marking()->IsStopped();
    3229          37 :       break;
    3230             :     }
    3231             :     case GCIdleTimeAction::kFullGC: {
    3232             :       DCHECK_LT(0, contexts_disposed_);
    3233         184 :       HistogramTimerScope scope(isolate_->counters()->gc_context());
    3234         552 :       TRACE_EVENT0("v8", "V8.GCContext");
    3235             :       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
    3236             :       break;
    3237             :     }
    3238             :   }
    3239             : 
    3240         469 :   return result;
    3241             : }
    3242             : 
    3243         469 : void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
    3244             :                                     GCIdleTimeHeapState heap_state,
    3245             :                                     double start_ms, double deadline_in_ms) {
    3246         469 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3247             :   double current_time = MonotonicallyIncreasingTimeInMs();
    3248         469 :   last_idle_notification_time_ = current_time;
    3249         469 :   double deadline_difference = deadline_in_ms - current_time;
    3250             : 
    3251         469 :   contexts_disposed_ = 0;
    3252             : 
    3253         469 :   if (FLAG_trace_idle_notification) {
    3254           0 :     isolate_->PrintWithTimestamp(
    3255             :         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
    3256             :         "ms, deadline usage %.2f ms [",
    3257             :         idle_time_in_ms, idle_time_in_ms - deadline_difference,
    3258           0 :         deadline_difference);
    3259           0 :     switch (action) {
    3260             :       case GCIdleTimeAction::kDone:
    3261           0 :         PrintF("done");
    3262           0 :         break;
    3263             :       case GCIdleTimeAction::kIncrementalStep:
    3264           0 :         PrintF("incremental step");
    3265           0 :         break;
    3266             :       case GCIdleTimeAction::kFullGC:
    3267           0 :         PrintF("full GC");
    3268           0 :         break;
    3269             :     }
    3270           0 :     PrintF("]");
    3271           0 :     if (FLAG_trace_idle_notification_verbose) {
    3272           0 :       PrintF("[");
    3273           0 :       heap_state.Print();
    3274           0 :       PrintF("]");
    3275             :     }
    3276           0 :     PrintF("\n");
    3277             :   }
    3278         469 : }
    3279             : 
    3280             : 
    3281    26120895 : double Heap::MonotonicallyIncreasingTimeInMs() {
    3282    28911165 :   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
    3283    28908153 :          static_cast<double>(base::Time::kMillisecondsPerSecond);
    3284             : }
    3285             : 
    3286             : 
    3287           0 : bool Heap::IdleNotification(int idle_time_in_ms) {
    3288           0 :   return IdleNotification(
    3289           0 :       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
    3290           0 :       (static_cast<double>(idle_time_in_ms) /
    3291           0 :        static_cast<double>(base::Time::kMillisecondsPerSecond)));
    3292             : }
    3293             : 
    3294             : 
    3295         469 : bool Heap::IdleNotification(double deadline_in_seconds) {
    3296         469 :   CHECK(HasBeenSetUp());
    3297             :   double deadline_in_ms =
    3298             :       deadline_in_seconds *
    3299         469 :       static_cast<double>(base::Time::kMillisecondsPerSecond);
    3300             :   HistogramTimerScope idle_notification_scope(
    3301         469 :       isolate_->counters()->gc_idle_notification());
    3302        1407 :   TRACE_EVENT0("v8", "V8.GCIdleNotification");
    3303             :   double start_ms = MonotonicallyIncreasingTimeInMs();
    3304         469 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3305             : 
    3306             :   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
    3307         469 :                              OldGenerationAllocationCounter());
    3308             : 
    3309         469 :   GCIdleTimeHeapState heap_state = ComputeHeapState();
    3310             : 
    3311             :   GCIdleTimeAction action =
    3312         469 :       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
    3313             : 
    3314         469 :   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
    3315             : 
    3316         469 :   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
    3317         469 :   return result;
    3318             : }
    3319             : 
    3320             : 
    3321           0 : bool Heap::RecentIdleNotificationHappened() {
    3322           0 :   return (last_idle_notification_time_ +
    3323             :           GCIdleTimeHandler::kMaxScheduledIdleTime) >
    3324           0 :          MonotonicallyIncreasingTimeInMs();
    3325             : }
    3326             : 
    3327             : class MemoryPressureInterruptTask : public CancelableTask {
    3328             :  public:
    3329             :   explicit MemoryPressureInterruptTask(Heap* heap)
    3330          11 :       : CancelableTask(heap->isolate()), heap_(heap) {}
    3331             : 
    3332          22 :   ~MemoryPressureInterruptTask() override = default;
    3333             : 
    3334             :  private:
    3335             :   // v8::internal::CancelableTask overrides.
    3336          11 :   void RunInternal() override { heap_->CheckMemoryPressure(); }
    3337             : 
    3338             :   Heap* heap_;
    3339             :   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
    3340             : };
    3341             : 
    3342     1615460 : void Heap::CheckMemoryPressure() {
    3343     1615460 :   if (HighMemoryPressure()) {
    3344             :     // The optimizing compiler may be unnecessarily holding on to memory.
    3345        7530 :     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    3346             :   }
    3347             :   MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
    3348             :   // Reset the memory pressure level to avoid recursive GCs triggered by
    3349             :   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
    3350             :   // the finalizers.
    3351             :   memory_pressure_level_ = MemoryPressureLevel::kNone;
    3352     1615460 :   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
    3353        7530 :     CollectGarbageOnMemoryPressure();
    3354     1607930 :   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
    3355           0 :     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3356             :       StartIncrementalMarking(kReduceMemoryFootprintMask,
    3357             :                               GarbageCollectionReason::kMemoryPressure);
    3358             :     }
    3359             :   }
    3360     1615460 :   if (memory_reducer_) {
    3361             :     MemoryReducer::Event event;
    3362     1615460 :     event.type = MemoryReducer::kPossibleGarbage;
    3363     1615460 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3364     1615460 :     memory_reducer_->NotifyPossibleGarbage(event);
    3365             :   }
    3366     1615460 : }
    3367             : 
    3368        7530 : void Heap::CollectGarbageOnMemoryPressure() {
    3369             :   const int kGarbageThresholdInBytes = 8 * MB;
    3370             :   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
    3371             :   // This constant is the maximum response time in RAIL performance model.
    3372             :   const double kMaxMemoryPressurePauseMs = 100;
    3373             : 
    3374             :   double start = MonotonicallyIncreasingTimeInMs();
    3375             :   CollectAllGarbage(kReduceMemoryFootprintMask,
    3376             :                     GarbageCollectionReason::kMemoryPressure,
    3377             :                     kGCCallbackFlagCollectAllAvailableGarbage);
    3378        7530 :   EagerlyFreeExternalMemory();
    3379             :   double end = MonotonicallyIncreasingTimeInMs();
    3380             : 
    3381             :   // Estimate how much memory we can free.
    3382       22590 :   int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
    3383       15060 :                               isolate()->isolate_data()->external_memory_;
    3384             :   // If we can potentially free large amount of memory, then start GC right
    3385             :   // away instead of waiting for memory reducer.
    3386       10198 :   if (potential_garbage >= kGarbageThresholdInBytes &&
    3387        2668 :       potential_garbage >=
    3388        2668 :           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
    3389             :     // If we spent less than half of the time budget, then perform full GC
    3390             :     // Otherwise, start incremental marking.
    3391        2668 :     if (end - start < kMaxMemoryPressurePauseMs / 2) {
    3392             :       CollectAllGarbage(kReduceMemoryFootprintMask,
    3393             :                         GarbageCollectionReason::kMemoryPressure,
    3394             :                         kGCCallbackFlagCollectAllAvailableGarbage);
    3395             :     } else {
    3396           0 :       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3397             :         StartIncrementalMarking(kReduceMemoryFootprintMask,
    3398             :                                 GarbageCollectionReason::kMemoryPressure);
    3399             :       }
    3400             :     }
    3401             :   }
    3402        7530 : }
    3403             : 
    3404        7540 : void Heap::MemoryPressureNotification(MemoryPressureLevel level,
    3405             :                                       bool is_isolate_locked) {
    3406             :   MemoryPressureLevel previous = memory_pressure_level_;
    3407             :   memory_pressure_level_ = level;
    3408       15080 :   if ((previous != MemoryPressureLevel::kCritical &&
    3409        7550 :        level == MemoryPressureLevel::kCritical) ||
    3410          20 :       (previous == MemoryPressureLevel::kNone &&
    3411          10 :        level == MemoryPressureLevel::kModerate)) {
    3412        7535 :     if (is_isolate_locked) {
    3413        7524 :       CheckMemoryPressure();
    3414             :     } else {
    3415             :       ExecutionAccess access(isolate());
    3416             :       isolate()->stack_guard()->RequestGC();
    3417          11 :       auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
    3418          11 :           reinterpret_cast<v8::Isolate*>(isolate()));
    3419          11 :       taskrunner->PostTask(
    3420          33 :           base::make_unique<MemoryPressureInterruptTask>(this));
    3421             :     }
    3422             :   }
    3423        7540 : }
    3424             : 
    3425        8786 : void Heap::EagerlyFreeExternalMemory() {
    3426       37463 :   for (Page* page : *old_space()) {
    3427       28677 :     if (!page->SweepingDone()) {
    3428             :       base::MutexGuard guard(page->mutex());
    3429        4954 :       if (!page->SweepingDone()) {
    3430             :         ArrayBufferTracker::FreeDead(
    3431        1529 :             page, mark_compact_collector()->non_atomic_marking_state());
    3432             :       }
    3433             :     }
    3434             :   }
    3435        8786 :   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
    3436        8786 : }
    3437             : 
    3438        3426 : void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3439             :                                     void* data) {
    3440             :   const size_t kMaxCallbacks = 100;
    3441        3426 :   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
    3442        3426 :   for (auto callback_data : near_heap_limit_callbacks_) {
    3443           0 :     CHECK_NE(callback_data.first, callback);
    3444             :   }
    3445        6852 :   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
    3446        3426 : }
    3447             : 
    3448        3418 : void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3449             :                                        size_t heap_limit) {
    3450        3418 :   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
    3451        3418 :     if (near_heap_limit_callbacks_[i].first == callback) {
    3452             :       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
    3453        3418 :       if (heap_limit) {
    3454           5 :         RestoreHeapLimit(heap_limit);
    3455             :       }
    3456        3418 :       return;
    3457             :     }
    3458             :   }
    3459           0 :   UNREACHABLE();
    3460             : }
    3461             : 
    3462           4 : void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
    3463             :   initial_max_old_generation_size_threshold_ =
    3464           4 :       initial_max_old_generation_size_ * threshold_percent;
    3465           4 : }
    3466             : 
    3467          69 : bool Heap::InvokeNearHeapLimitCallback() {
    3468          69 :   if (near_heap_limit_callbacks_.size() > 0) {
    3469             :     HandleScope scope(isolate());
    3470             :     v8::NearHeapLimitCallback callback =
    3471          22 :         near_heap_limit_callbacks_.back().first;
    3472          22 :     void* data = near_heap_limit_callbacks_.back().second;
    3473          22 :     size_t heap_limit = callback(data, max_old_generation_size_,
    3474          22 :                                  initial_max_old_generation_size_);
    3475          22 :     if (heap_limit > max_old_generation_size_) {
    3476          22 :       max_old_generation_size_ = heap_limit;
    3477             :       return true;
    3478             :     }
    3479             :   }
    3480             :   return false;
    3481             : }
    3482             : 
    3483           0 : void Heap::CollectCodeStatistics() {
    3484           0 :   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
    3485           0 :   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
    3486             :   // We do not look for code in new space, or map space.  If code
    3487             :   // somehow ends up in those spaces, we would miss it here.
    3488           0 :   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
    3489           0 :   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
    3490           0 :   CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
    3491           0 : }
    3492             : 
    3493             : #ifdef DEBUG
    3494             : 
    3495             : void Heap::Print() {
    3496             :   if (!HasBeenSetUp()) return;
    3497             :   isolate()->PrintStack(stdout);
    3498             : 
    3499             :   for (SpaceIterator it(this); it.has_next();) {
    3500             :     it.next()->Print();
    3501             :   }
    3502             : }
    3503             : 
    3504             : 
    3505             : void Heap::ReportCodeStatistics(const char* title) {
    3506             :   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
    3507             :   CollectCodeStatistics();
    3508             :   CodeStatistics::ReportCodeStatistics(isolate());
    3509             : }
    3510             : 
    3511             : #endif  // DEBUG
    3512             : 
    3513       94913 : const char* Heap::GarbageCollectionReasonToString(
    3514             :     GarbageCollectionReason gc_reason) {
    3515       94913 :   switch (gc_reason) {
    3516             :     case GarbageCollectionReason::kAllocationFailure:
    3517             :       return "allocation failure";
    3518             :     case GarbageCollectionReason::kAllocationLimit:
    3519           0 :       return "allocation limit";
    3520             :     case GarbageCollectionReason::kContextDisposal:
    3521         184 :       return "context disposal";
    3522             :     case GarbageCollectionReason::kCountersExtension:
    3523           0 :       return "counters extension";
    3524             :     case GarbageCollectionReason::kDebugger:
    3525       14369 :       return "debugger";
    3526             :     case GarbageCollectionReason::kDeserializer:
    3527           3 :       return "deserialize";
    3528             :     case GarbageCollectionReason::kExternalMemoryPressure:
    3529         836 :       return "external memory pressure";
    3530             :     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
    3531        5075 :       return "finalize incremental marking via stack guard";
    3532             :     case GarbageCollectionReason::kFinalizeMarkingViaTask:
    3533       13798 :       return "finalize incremental marking via task";
    3534             :     case GarbageCollectionReason::kFullHashtable:
    3535           0 :       return "full hash-table";
    3536             :     case GarbageCollectionReason::kHeapProfiler:
    3537        1140 :       return "heap profiler";
    3538             :     case GarbageCollectionReason::kIdleTask:
    3539        1341 :       return "idle task";
    3540             :     case GarbageCollectionReason::kLastResort:
    3541          10 :       return "last resort";
    3542             :     case GarbageCollectionReason::kLowMemoryNotification:
    3543        1062 :       return "low memory notification";
    3544             :     case GarbageCollectionReason::kMakeHeapIterable:
    3545           0 :       return "make heap iterable";
    3546             :     case GarbageCollectionReason::kMemoryPressure:
    3547       10198 :       return "memory pressure";
    3548             :     case GarbageCollectionReason::kMemoryReducer:
    3549           0 :       return "memory reducer";
    3550             :     case GarbageCollectionReason::kRuntime:
    3551         336 :       return "runtime";
    3552             :     case GarbageCollectionReason::kSamplingProfiler:
    3553          20 :       return "sampling profiler";
    3554             :     case GarbageCollectionReason::kSnapshotCreator:
    3555         392 :       return "snapshot creator";
    3556             :     case GarbageCollectionReason::kTesting:
    3557       28350 :       return "testing";
    3558             :     case GarbageCollectionReason::kExternalFinalize:
    3559           5 :       return "external finalize";
    3560             :     case GarbageCollectionReason::kUnknown:
    3561           5 :       return "unknown";
    3562             :   }
    3563           0 :   UNREACHABLE();
    3564             : }
    3565             : 
    3566     1934217 : bool Heap::Contains(HeapObject value) {
    3567             :   // Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
    3568             :   // shared RO_SPACE.
    3569             :   // TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
    3570             :   // appropriate.
    3571     3868434 :   if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
    3572             :     return true;
    3573             :   }
    3574     1934217 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3575             :     return false;
    3576             :   }
    3577     3868434 :   return HasBeenSetUp() &&
    3578     3837735 :          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
    3579          20 :           code_space_->Contains(value) || map_space_->Contains(value) ||
    3580           5 :           lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
    3581           0 :           new_lo_space_->Contains(value));
    3582             : }
    3583             : 
    3584          70 : bool Heap::InSpace(HeapObject value, AllocationSpace space) {
    3585          70 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3586             :     return false;
    3587             :   }
    3588          70 :   if (!HasBeenSetUp()) return false;
    3589             : 
    3590          70 :   switch (space) {
    3591             :     case NEW_SPACE:
    3592          15 :       return new_space_->ToSpaceContains(value);
    3593             :     case OLD_SPACE:
    3594          30 :       return old_space_->Contains(value);
    3595             :     case CODE_SPACE:
    3596           0 :       return code_space_->Contains(value);
    3597             :     case MAP_SPACE:
    3598           0 :       return map_space_->Contains(value);
    3599             :     case LO_SPACE:
    3600          30 :       return lo_space_->Contains(value);
    3601             :     case CODE_LO_SPACE:
    3602          10 :       return code_lo_space_->Contains(value);
    3603             :     case NEW_LO_SPACE:
    3604           0 :       return new_lo_space_->Contains(value);
    3605             :     case RO_SPACE:
    3606           0 :       return read_only_space_->Contains(value);
    3607             :   }
    3608           0 :   UNREACHABLE();
    3609             : }
    3610             : 
    3611           0 : bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
    3612           0 :   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
    3613             :     return false;
    3614             :   }
    3615           0 :   if (!HasBeenSetUp()) return false;
    3616             : 
    3617           0 :   switch (space) {
    3618             :     case NEW_SPACE:
    3619           0 :       return new_space_->ToSpaceContainsSlow(addr);
    3620             :     case OLD_SPACE:
    3621           0 :       return old_space_->ContainsSlow(addr);
    3622             :     case CODE_SPACE:
    3623           0 :       return code_space_->ContainsSlow(addr);
    3624             :     case MAP_SPACE:
    3625           0 :       return map_space_->ContainsSlow(addr);
    3626             :     case LO_SPACE:
    3627           0 :       return lo_space_->ContainsSlow(addr);
    3628             :     case CODE_LO_SPACE:
    3629           0 :       return code_lo_space_->ContainsSlow(addr);
    3630             :     case NEW_LO_SPACE:
    3631           0 :       return new_lo_space_->ContainsSlow(addr);
    3632             :     case RO_SPACE:
    3633           0 :       return read_only_space_->ContainsSlow(addr);
    3634             :   }
    3635           0 :   UNREACHABLE();
    3636             : }
    3637             : 
    3638          40 : bool Heap::IsValidAllocationSpace(AllocationSpace space) {
    3639          40 :   switch (space) {
    3640             :     case NEW_SPACE:
    3641             :     case OLD_SPACE:
    3642             :     case CODE_SPACE:
    3643             :     case MAP_SPACE:
    3644             :     case LO_SPACE:
    3645             :     case NEW_LO_SPACE:
    3646             :     case CODE_LO_SPACE:
    3647             :     case RO_SPACE:
    3648             :       return true;
    3649             :     default:
    3650           0 :       return false;
    3651             :   }
    3652             : }
    3653             : 
    3654             : #ifdef VERIFY_HEAP
    3655             : class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
    3656             :  public:
    3657             :   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
    3658             :       : VerifyPointersVisitor(heap) {}
    3659             : 
    3660             :  protected:
    3661             :   void VerifyPointers(HeapObject host, MaybeObjectSlot start,
    3662             :                       MaybeObjectSlot end) override {
    3663             :     if (!host.is_null()) {
    3664             :       CHECK(ReadOnlyHeap::Contains(host->map()));
    3665             :     }
    3666             :     VerifyPointersVisitor::VerifyPointers(host, start, end);
    3667             : 
    3668             :     for (MaybeObjectSlot current = start; current < end; ++current) {
    3669             :       HeapObject heap_object;
    3670             :       if ((*current)->GetHeapObject(&heap_object)) {
    3671             :         CHECK(ReadOnlyHeap::Contains(heap_object));
    3672             :       }
    3673             :     }
    3674             :   }
    3675             : };
    3676             : 
    3677             : void Heap::Verify() {
    3678             :   CHECK(HasBeenSetUp());
    3679             :   HandleScope scope(isolate());
    3680             : 
    3681             :   // We have to wait here for the sweeper threads to have an iterable heap.
    3682             :   mark_compact_collector()->EnsureSweepingCompleted();
    3683             : 
    3684             :   VerifyPointersVisitor visitor(this);
    3685             :   IterateRoots(&visitor, VISIT_ONLY_STRONG);
    3686             : 
    3687             :   if (!isolate()->context().is_null() &&
    3688             :       !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
    3689             :     NormalizedMapCache::cast(*isolate()->normalized_map_cache())
    3690             :         ->NormalizedMapCacheVerify(isolate());
    3691             :   }
    3692             : 
    3693             :   VerifySmisVisitor smis_visitor;
    3694             :   IterateSmiRoots(&smis_visitor);
    3695             : 
    3696             :   new_space_->Verify(isolate());
    3697             : 
    3698             :   old_space_->Verify(isolate(), &visitor);
    3699             :   map_space_->Verify(isolate(), &visitor);
    3700             : 
    3701             :   VerifyPointersVisitor no_dirty_regions_visitor(this);
    3702             :   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
    3703             : 
    3704             :   lo_space_->Verify(isolate());
    3705             :   code_lo_space_->Verify(isolate());
    3706             :   new_lo_space_->Verify(isolate());
    3707             : 
    3708             :   VerifyReadOnlyPointersVisitor read_only_visitor(this);
    3709             :   read_only_space_->Verify(isolate(), &read_only_visitor);
    3710             : }
    3711             : 
    3712             : class SlotVerifyingVisitor : public ObjectVisitor {
    3713             :  public:
    3714             :   SlotVerifyingVisitor(std::set<Address>* untyped,
    3715             :                        std::set<std::pair<SlotType, Address> >* typed)
    3716             :       : untyped_(untyped), typed_(typed) {}
    3717             : 
    3718             :   virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
    3719             : 
    3720             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3721             :                      ObjectSlot end) override {
    3722             : #ifdef DEBUG
    3723             :     for (ObjectSlot slot = start; slot < end; ++slot) {
    3724             :       DCHECK(!HasWeakHeapObjectTag(*slot));
    3725             :     }
    3726             : #endif  // DEBUG
    3727             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3728             :   }
    3729             : 
    3730             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3731             :                      MaybeObjectSlot end) final {
    3732             :     for (MaybeObjectSlot slot = start; slot < end; ++slot) {
    3733             :       if (ShouldHaveBeenRecorded(host, *slot)) {
    3734             :         CHECK_GT(untyped_->count(slot.address()), 0);
    3735             :       }
    3736             :     }
    3737             :   }
    3738             : 
    3739             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3740             :     Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3741             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3742             :       CHECK(
    3743             :           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
    3744             :           (rinfo->IsInConstantPool() &&
    3745             :            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
    3746             :     }
    3747             :   }
    3748             : 
    3749             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3750             :     Object target = rinfo->target_object();
    3751             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3752             :       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
    3753             :             (rinfo->IsInConstantPool() &&
    3754             :              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
    3755             :     }
    3756             :   }
    3757             : 
    3758             :  private:
    3759             :   bool InTypedSet(SlotType type, Address slot) {
    3760             :     return typed_->count(std::make_pair(type, slot)) > 0;
    3761             :   }
    3762             :   std::set<Address>* untyped_;
    3763             :   std::set<std::pair<SlotType, Address> >* typed_;
    3764             : };
    3765             : 
    3766             : class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
    3767             :  public:
    3768             :   OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
    3769             :                                std::set<std::pair<SlotType, Address>>* typed)
    3770             :       : SlotVerifyingVisitor(untyped, typed) {}
    3771             : 
    3772             :   bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
    3773             :     DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
    3774             :                    Heap::InToPage(target));
    3775             :     return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
    3776             :            !Heap::InYoungGeneration(host);
    3777             :   }
    3778             : };
    3779             : 
    3780             : template <RememberedSetType direction>
    3781             : void CollectSlots(MemoryChunk* chunk, Address start, Address end,
    3782             :                   std::set<Address>* untyped,
    3783             :                   std::set<std::pair<SlotType, Address> >* typed) {
    3784             :   RememberedSet<direction>::Iterate(
    3785             :       chunk,
    3786             :       [start, end, untyped](MaybeObjectSlot slot) {
    3787             :         if (start <= slot.address() && slot.address() < end) {
    3788             :           untyped->insert(slot.address());
    3789             :         }
    3790             :         return KEEP_SLOT;
    3791             :       },
    3792             :       SlotSet::PREFREE_EMPTY_BUCKETS);
    3793             :   RememberedSet<direction>::IterateTyped(
    3794             :       chunk, [=](SlotType type, Address slot) {
    3795             :         if (start <= slot && slot < end) {
    3796             :           typed->insert(std::make_pair(type, slot));
    3797             :         }
    3798             :         return KEEP_SLOT;
    3799             :       });
    3800             : }
    3801             : 
    3802             : void Heap::VerifyRememberedSetFor(HeapObject object) {
    3803             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3804             :   DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
    3805             :   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
    3806             :   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
    3807             :       chunk->mutex());
    3808             :   Address start = object->address();
    3809             :   Address end = start + object->Size();
    3810             :   std::set<Address> old_to_new;
    3811             :   std::set<std::pair<SlotType, Address> > typed_old_to_new;
    3812             :   if (!InYoungGeneration(object)) {
    3813             :     store_buffer()->MoveAllEntriesToRememberedSet();
    3814             :     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
    3815             :     OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
    3816             :     object->IterateBody(&visitor);
    3817             :   }
    3818             :   // TODO(ulan): Add old to old slot set verification once all weak objects
    3819             :   // have their own instance types and slots are recorded for all weal fields.
    3820             : }
    3821             : #endif
    3822             : 
    3823             : #ifdef DEBUG
    3824             : void Heap::VerifyCountersAfterSweeping() {
    3825             :   PagedSpaces spaces(this);
    3826             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3827             :        space = spaces.next()) {
    3828             :     space->VerifyCountersAfterSweeping();
    3829             :   }
    3830             : }
    3831             : 
    3832             : void Heap::VerifyCountersBeforeConcurrentSweeping() {
    3833             :   PagedSpaces spaces(this);
    3834             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3835             :        space = spaces.next()) {
    3836             :     space->VerifyCountersBeforeConcurrentSweeping();
    3837             :   }
    3838             : }
    3839             : #endif
    3840             : 
    3841           0 : void Heap::ZapFromSpace() {
    3842           0 :   if (!new_space_->IsFromSpaceCommitted()) return;
    3843           0 :   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
    3844           0 :     memory_allocator()->ZapBlock(page->area_start(),
    3845           0 :                                  page->HighWaterMark() - page->area_start(),
    3846           0 :                                  ZapValue());
    3847             :   }
    3848             : }
    3849             : 
    3850     1210727 : void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
    3851             : #ifdef DEBUG
    3852             :   DCHECK(IsAligned(start_address, kIntSize));
    3853             :   for (int i = 0; i < size_in_bytes / kIntSize; i++) {
    3854             :     Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
    3855             :   }
    3856             : #endif
    3857     1210727 : }
    3858             : 
    3859             : // TODO(ishell): move builtin accessors out from Heap.
    3860   146932885 : Code Heap::builtin(int index) {
    3861             :   DCHECK(Builtins::IsBuiltinId(index));
    3862   146932885 :   return Code::cast(Object(isolate()->builtins_table()[index]));
    3863             : }
    3864             : 
    3865    51744507 : Address Heap::builtin_address(int index) {
    3866             :   DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
    3867   439445527 :   return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
    3868             : }
    3869             : 
    3870      254520 : void Heap::set_builtin(int index, Code builtin) {
    3871             :   DCHECK(Builtins::IsBuiltinId(index));
    3872             :   DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
    3873             :   // The given builtin may be completely uninitialized thus we cannot check its
    3874             :   // type here.
    3875      509040 :   isolate()->builtins_table()[index] = builtin.ptr();
    3876      254520 : }
    3877             : 
    3878       95326 : void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
    3879       96616 :   IterateStrongRoots(v, mode);
    3880       96616 :   IterateWeakRoots(v, mode);
    3881       95326 : }
    3882             : 
    3883      158355 : void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
    3884      316710 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3885      316710 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3886             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3887      158355 :   v->VisitRootPointer(Root::kStringTable, nullptr,
    3888      316710 :                       FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
    3889      158355 :   v->Synchronize(VisitorSynchronization::kStringTable);
    3890      158355 :   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
    3891             :       mode != VISIT_FOR_SERIALIZATION) {
    3892             :     // Scavenge collections have special processing for this.
    3893             :     // Do not visit for serialization, since the external string table will
    3894             :     // be populated from scratch upon deserialization.
    3895        1688 :     external_string_table_.IterateAll(v);
    3896             :   }
    3897      158355 :   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
    3898      158355 : }
    3899             : 
    3900       61739 : void Heap::IterateSmiRoots(RootVisitor* v) {
    3901             :   // Acquire execution access since we are going to read stack limit values.
    3902             :   ExecutionAccess access(isolate());
    3903             :   v->VisitRootPointers(Root::kSmiRootList, nullptr,
    3904             :                        roots_table().smi_roots_begin(),
    3905      123478 :                        roots_table().smi_roots_end());
    3906       61739 :   v->Synchronize(VisitorSynchronization::kSmiRootList);
    3907       61739 : }
    3908             : 
    3909             : // We cannot avoid stale handles to left-trimmed objects, but can only make
    3910             : // sure all handles still needed are updated. Filter out a stale pointer
    3911             : // and clear the slot to allow post processing of handles (needed because
    3912             : // the sweeper might actually free the underlying page).
    3913      276888 : class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
    3914             :  public:
    3915      276888 :   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
    3916             :     USE(heap_);
    3917             :   }
    3918             : 
    3919           0 :   void VisitRootPointer(Root root, const char* description,
    3920             :                         FullObjectSlot p) override {
    3921           0 :     FixHandle(p);
    3922           0 :   }
    3923             : 
    3924      638830 :   void VisitRootPointers(Root root, const char* description,
    3925             :                          FullObjectSlot start, FullObjectSlot end) override {
    3926    93729049 :     for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
    3927      638830 :   }
    3928             : 
    3929             :  private:
    3930    92451389 :   inline void FixHandle(FullObjectSlot p) {
    3931    92451389 :     if (!(*p)->IsHeapObject()) return;
    3932             :     HeapObject current = HeapObject::cast(*p);
    3933             :     const MapWord map_word = current->map_word();
    3934   155008591 :     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
    3935             : #ifdef DEBUG
    3936             :       // We need to find a FixedArrayBase map after walking the fillers.
    3937             :       while (current->IsFiller()) {
    3938             :         Address next = current->ptr();
    3939             :         if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
    3940             :           next += kTaggedSize;
    3941             :         } else if (current->map() ==
    3942             :                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
    3943             :           next += 2 * kTaggedSize;
    3944             :         } else {
    3945             :           next += current->Size();
    3946             :         }
    3947             :         current = HeapObject::cast(Object(next));
    3948             :       }
    3949             :       DCHECK(current->IsFixedArrayBase());
    3950             : #endif  // DEBUG
    3951             :       p.store(Smi::kZero);
    3952             :     }
    3953             :   }
    3954             : 
    3955             :   Heap* heap_;
    3956             : };
    3957             : 
    3958      276888 : void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
    3959      553776 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3960      553776 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3961             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3962             :   v->VisitRootPointers(Root::kStrongRootList, nullptr,
    3963             :                        roots_table().strong_roots_begin(),
    3964      553776 :                        roots_table().strong_roots_end());
    3965      276888 :   v->Synchronize(VisitorSynchronization::kStrongRootList);
    3966             : 
    3967      276888 :   isolate_->bootstrapper()->Iterate(v);
    3968      276888 :   v->Synchronize(VisitorSynchronization::kBootstrapper);
    3969      276888 :   isolate_->Iterate(v);
    3970      276888 :   v->Synchronize(VisitorSynchronization::kTop);
    3971      276888 :   Relocatable::Iterate(isolate_, v);
    3972      276888 :   v->Synchronize(VisitorSynchronization::kRelocatable);
    3973      276888 :   isolate_->debug()->Iterate(v);
    3974      276888 :   v->Synchronize(VisitorSynchronization::kDebug);
    3975             : 
    3976      276888 :   isolate_->compilation_cache()->Iterate(v);
    3977      276888 :   v->Synchronize(VisitorSynchronization::kCompilationCache);
    3978             : 
    3979             :   // Iterate over local handles in handle scopes.
    3980             :   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
    3981      276888 :   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
    3982      276888 :   isolate_->handle_scope_implementer()->Iterate(v);
    3983      276888 :   isolate_->IterateDeferredHandles(v);
    3984      276888 :   v->Synchronize(VisitorSynchronization::kHandleScope);
    3985             : 
    3986             :   // Iterate over the builtin code objects and code stubs in the
    3987             :   // heap. Note that it is not necessary to iterate over code objects
    3988             :   // on scavenge collections.
    3989      276888 :   if (!isMinorGC) {
    3990      255915 :     IterateBuiltins(v);
    3991      255915 :     v->Synchronize(VisitorSynchronization::kBuiltins);
    3992             : 
    3993             :     // The dispatch table is set up directly from the builtins using
    3994             :     // IntitializeDispatchTable so there is no need to iterate to create it.
    3995      255915 :     if (mode != VISIT_FOR_SERIALIZATION) {
    3996             :       // Currently we iterate the dispatch table to update pointers to possibly
    3997             :       // moved Code objects for bytecode handlers.
    3998             :       // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
    3999             :       // immovable) in every build configuration.
    4000      194176 :       isolate_->interpreter()->IterateDispatchTable(v);
    4001      194176 :       v->Synchronize(VisitorSynchronization::kDispatchTable);
    4002             :     }
    4003             :   }
    4004             : 
    4005             :   // Iterate over global handles.
    4006      276888 :   switch (mode) {
    4007             :     case VISIT_FOR_SERIALIZATION:
    4008             :       // Global handles are not iterated by the serializer. Values referenced by
    4009             :       // global handles need to be added manually.
    4010             :       break;
    4011             :     case VISIT_ONLY_STRONG:
    4012      118931 :       isolate_->global_handles()->IterateStrongRoots(v);
    4013      118931 :       break;
    4014             :     case VISIT_ALL_IN_SCAVENGE:
    4015             :     case VISIT_ALL_IN_MINOR_MC_MARK:
    4016       20973 :       isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
    4017       20973 :       break;
    4018             :     case VISIT_ALL_IN_MINOR_MC_UPDATE:
    4019           0 :       isolate_->global_handles()->IterateAllYoungRoots(v);
    4020           0 :       break;
    4021             :     case VISIT_ALL_IN_SWEEP_NEWSPACE:
    4022             :     case VISIT_ALL:
    4023       75245 :       isolate_->global_handles()->IterateAllRoots(v);
    4024       75245 :       break;
    4025             :   }
    4026      276888 :   v->Synchronize(VisitorSynchronization::kGlobalHandles);
    4027             : 
    4028             :   // Iterate over eternal handles. Eternal handles are not iterated by the
    4029             :   // serializer. Values referenced by eternal handles need to be added manually.
    4030      276888 :   if (mode != VISIT_FOR_SERIALIZATION) {
    4031      215149 :     if (isMinorGC) {
    4032       20973 :       isolate_->eternal_handles()->IterateYoungRoots(v);
    4033             :     } else {
    4034      194176 :       isolate_->eternal_handles()->IterateAllRoots(v);
    4035             :     }
    4036             :   }
    4037      276888 :   v->Synchronize(VisitorSynchronization::kEternalHandles);
    4038             : 
    4039             :   // Iterate over pointers being held by inactive threads.
    4040      276888 :   isolate_->thread_manager()->Iterate(v);
    4041      276888 :   v->Synchronize(VisitorSynchronization::kThreadManager);
    4042             : 
    4043             :   // Iterate over other strong roots (currently only identity maps).
    4044      554116 :   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
    4045      277228 :     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
    4046             :   }
    4047      276888 :   v->Synchronize(VisitorSynchronization::kStrongRoots);
    4048             : 
    4049             :   // Iterate over pending Microtasks stored in MicrotaskQueues.
    4050      276888 :   MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
    4051      276888 :   if (default_microtask_queue) {
    4052             :     MicrotaskQueue* microtask_queue = default_microtask_queue;
    4053             :     do {
    4054      276890 :       microtask_queue->IterateMicrotasks(v);
    4055             :       microtask_queue = microtask_queue->next();
    4056      276890 :     } while (microtask_queue != default_microtask_queue);
    4057             :   }
    4058             : 
    4059             :   // Iterate over the partial snapshot cache unless serializing or
    4060             :   // deserializing.
    4061      276888 :   if (mode != VISIT_FOR_SERIALIZATION) {
    4062      215149 :     SerializerDeserializer::Iterate(isolate_, v);
    4063      215149 :     v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
    4064             :   }
    4065      276888 : }
    4066             : 
    4067         398 : void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
    4068         398 :   isolate_->global_handles()->IterateWeakRoots(v);
    4069         398 : }
    4070             : 
    4071      256014 : void Heap::IterateBuiltins(RootVisitor* v) {
    4072   775657856 :   for (int i = 0; i < Builtins::builtin_count; i++) {
    4073   775401941 :     v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
    4074   775401895 :                         FullObjectSlot(builtin_address(i)));
    4075             :   }
    4076             : #ifdef V8_EMBEDDED_BUILTINS
    4077             :   // The entry table does not need to be updated if all builtins are embedded.
    4078             :   STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
    4079             : #else
    4080             :   // If builtins are not embedded, they may move and thus the entry table must
    4081             :   // be updated.
    4082             :   // TODO(v8:6666): Remove once builtins are embedded unconditionally.
    4083             :   Builtins::UpdateBuiltinEntryTable(isolate());
    4084             : #endif  // V8_EMBEDDED_BUILTINS
    4085      255915 : }
    4086             : 
    4087             : // TODO(1236194): Since the heap size is configurable on the command line
    4088             : // and through the API, we should gracefully handle the case that the heap
    4089             : // size is not big enough to fit all the initial objects.
    4090       61531 : void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
    4091             :                          size_t max_old_generation_size_in_mb,
    4092             :                          size_t code_range_size_in_mb) {
    4093             :   // Overwrite default configuration.
    4094       61531 :   if (max_semi_space_size_in_kb != 0) {
    4095             :     max_semi_space_size_ =
    4096       59626 :         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
    4097             :   }
    4098       61531 :   if (max_old_generation_size_in_mb != 0) {
    4099       29817 :     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
    4100             :   }
    4101             : 
    4102             :   // If max space size flags are specified overwrite the configuration.
    4103       61531 :   if (FLAG_max_semi_space_size > 0) {
    4104         186 :     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
    4105             :   }
    4106       61531 :   if (FLAG_max_old_space_size > 0) {
    4107             :     max_old_generation_size_ =
    4108          39 :         static_cast<size_t>(FLAG_max_old_space_size) * MB;
    4109             :   }
    4110             : 
    4111             :   if (Page::kPageSize > MB) {
    4112             :     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
    4113             :     max_old_generation_size_ =
    4114             :         RoundUp<Page::kPageSize>(max_old_generation_size_);
    4115             :   }
    4116             : 
    4117       61531 :   if (FLAG_stress_compaction) {
    4118             :     // This will cause more frequent GCs when stressing.
    4119          96 :     max_semi_space_size_ = MB;
    4120             :   }
    4121             : 
    4122             :   // The new space size must be a power of two to support single-bit testing
    4123             :   // for containment.
    4124       61531 :   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
    4125      123063 :       static_cast<uint64_t>(max_semi_space_size_)));
    4126             : 
    4127       61532 :   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
    4128             :     // Start with at least 1*MB semi-space on machines with a lot of memory.
    4129             :     initial_semispace_size_ =
    4130      122652 :         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
    4131             :   }
    4132             : 
    4133       61532 :   if (FLAG_min_semi_space_size > 0) {
    4134             :     size_t initial_semispace_size =
    4135          35 :         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
    4136          35 :     if (initial_semispace_size > max_semi_space_size_) {
    4137           5 :       initial_semispace_size_ = max_semi_space_size_;
    4138           5 :       if (FLAG_trace_gc) {
    4139           0 :         PrintIsolate(isolate_,
    4140             :                      "Min semi-space size cannot be more than the maximum "
    4141             :                      "semi-space size of %" PRIuS " MB\n",
    4142           0 :                      max_semi_space_size_ / MB);
    4143             :       }
    4144             :     } else {
    4145             :       initial_semispace_size_ =
    4146          30 :           RoundUp<Page::kPageSize>(initial_semispace_size);
    4147             :     }
    4148             :   }
    4149             : 
    4150      123066 :   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
    4151             : 
    4152       61533 :   if (FLAG_semi_space_growth_factor < 2) {
    4153           0 :     FLAG_semi_space_growth_factor = 2;
    4154             :   }
    4155             : 
    4156             :   // The old generation is paged and needs at least one page for each space.
    4157             :   int paged_space_count =
    4158             :       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
    4159             :   initial_max_old_generation_size_ = max_old_generation_size_ =
    4160       61533 :       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
    4161       61533 :           max_old_generation_size_);
    4162             : 
    4163       61533 :   if (FLAG_initial_old_space_size > 0) {
    4164           0 :     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
    4165             :   } else {
    4166             :     initial_old_generation_size_ =
    4167       61533 :         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
    4168             :   }
    4169       61533 :   old_generation_allocation_limit_ = initial_old_generation_size_;
    4170             : 
    4171             :   // We rely on being able to allocate new arrays in paged spaces.
    4172             :   DCHECK(kMaxRegularHeapObjectSize >=
    4173             :          (JSArray::kSize +
    4174             :           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
    4175             :           AllocationMemento::kSize));
    4176             : 
    4177       61533 :   code_range_size_ = code_range_size_in_mb * MB;
    4178             : 
    4179       61533 :   configured_ = true;
    4180       61533 : }
    4181             : 
    4182             : 
    4183       94908 : void Heap::AddToRingBuffer(const char* string) {
    4184             :   size_t first_part =
    4185       94908 :       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
    4186       94908 :   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
    4187       94908 :   ring_buffer_end_ += first_part;
    4188       94908 :   if (first_part < strlen(string)) {
    4189       24442 :     ring_buffer_full_ = true;
    4190       24442 :     size_t second_part = strlen(string) - first_part;
    4191       24442 :     memcpy(trace_ring_buffer_, string + first_part, second_part);
    4192       24442 :     ring_buffer_end_ = second_part;
    4193             :   }
    4194       94908 : }
    4195             : 
    4196             : 
    4197          15 : void Heap::GetFromRingBuffer(char* buffer) {
    4198             :   size_t copied = 0;
    4199          15 :   if (ring_buffer_full_) {
    4200           0 :     copied = kTraceRingBufferSize - ring_buffer_end_;
    4201           0 :     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
    4202             :   }
    4203          15 :   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
    4204          15 : }
    4205             : 
    4206       31712 : void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
    4207             : 
    4208          15 : void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
    4209          15 :   *stats->start_marker = HeapStats::kStartMarker;
    4210          15 :   *stats->end_marker = HeapStats::kEndMarker;
    4211          15 :   *stats->ro_space_size = read_only_space_->Size();
    4212          30 :   *stats->ro_space_capacity = read_only_space_->Capacity();
    4213          15 :   *stats->new_space_size = new_space_->Size();
    4214          30 :   *stats->new_space_capacity = new_space_->Capacity();
    4215          15 :   *stats->old_space_size = old_space_->SizeOfObjects();
    4216          30 :   *stats->old_space_capacity = old_space_->Capacity();
    4217          15 :   *stats->code_space_size = code_space_->SizeOfObjects();
    4218          30 :   *stats->code_space_capacity = code_space_->Capacity();
    4219          15 :   *stats->map_space_size = map_space_->SizeOfObjects();
    4220          30 :   *stats->map_space_capacity = map_space_->Capacity();
    4221          15 :   *stats->lo_space_size = lo_space_->Size();
    4222          15 :   *stats->code_lo_space_size = code_lo_space_->Size();
    4223          15 :   isolate_->global_handles()->RecordStats(stats);
    4224          30 :   *stats->memory_allocator_size = memory_allocator()->Size();
    4225          15 :   *stats->memory_allocator_capacity =
    4226          15 :       memory_allocator()->Size() + memory_allocator()->Available();
    4227          15 :   *stats->os_error = base::OS::GetLastError();
    4228          30 :   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
    4229          30 :   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
    4230          15 :   if (take_snapshot) {
    4231           0 :     HeapIterator iterator(this);
    4232           0 :     for (HeapObject obj = iterator.next(); !obj.is_null();
    4233             :          obj = iterator.next()) {
    4234             :       InstanceType type = obj->map()->instance_type();
    4235             :       DCHECK(0 <= type && type <= LAST_TYPE);
    4236           0 :       stats->objects_per_type[type]++;
    4237           0 :       stats->size_per_type[type] += obj->Size();
    4238             :     }
    4239             :   }
    4240          15 :   if (stats->last_few_messages != nullptr)
    4241          15 :     GetFromRingBuffer(stats->last_few_messages);
    4242          15 :   if (stats->js_stacktrace != nullptr) {
    4243             :     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
    4244             :     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
    4245          15 :     if (gc_state() == Heap::NOT_IN_GC) {
    4246          15 :       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
    4247             :     } else {
    4248           0 :       accumulator.Add("Cannot get stack trace in GC.");
    4249             :     }
    4250             :   }
    4251          15 : }
    4252             : 
    4253     1833970 : size_t Heap::OldGenerationSizeOfObjects() {
    4254             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
    4255             :   size_t total = 0;
    4256     9169853 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4257             :        space = spaces.next()) {
    4258     7335884 :     total += space->SizeOfObjects();
    4259             :   }
    4260     1833972 :   return total + lo_space_->SizeOfObjects();
    4261             : }
    4262             : 
    4263         610 : uint64_t Heap::PromotedExternalMemorySize() {
    4264             :   IsolateData* isolate_data = isolate()->isolate_data();
    4265      908460 :   if (isolate_data->external_memory_ <=
    4266      454230 :       isolate_data->external_memory_at_last_mark_compact_) {
    4267             :     return 0;
    4268             :   }
    4269             :   return static_cast<uint64_t>(
    4270       24127 :       isolate_data->external_memory_ -
    4271       24127 :       isolate_data->external_memory_at_last_mark_compact_);
    4272             : }
    4273             : 
    4274        2075 : bool Heap::ShouldOptimizeForLoadTime() {
    4275           0 :   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
    4276        2075 :          !AllocationLimitOvershotByLargeMargin() &&
    4277             :          MonotonicallyIncreasingTimeInMs() <
    4278        2075 :              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
    4279             : }
    4280             : 
    4281             : // This predicate is called when an old generation space cannot allocated from
    4282             : // the free list and is about to add a new page. Returning false will cause a
    4283             : // major GC. It happens when the old generation allocation limit is reached and
    4284             : // - either we need to optimize for memory usage,
    4285             : // - or the incremental marking is not in progress and we cannot start it.
    4286      492460 : bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
    4287      492460 :   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
    4288             :   // We reached the old generation allocation limit.
    4289             : 
    4290         718 :   if (ShouldOptimizeForMemoryUsage()) return false;
    4291             : 
    4292         713 :   if (ShouldOptimizeForLoadTime()) return true;
    4293             : 
    4294         713 :   if (incremental_marking()->NeedsFinalization()) {
    4295         586 :     return !AllocationLimitOvershotByLargeMargin();
    4296             :   }
    4297             : 
    4298         166 :   if (incremental_marking()->IsStopped() &&
    4299          39 :       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
    4300             :     // We cannot start incremental marking.
    4301             :     return false;
    4302             :   }
    4303          88 :   return true;
    4304             : }
    4305             : 
    4306       80777 : Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
    4307       80777 :   if (ShouldReduceMemory() || FLAG_stress_compaction) {
    4308             :     return Heap::HeapGrowingMode::kMinimal;
    4309             :   }
    4310             : 
    4311       66399 :   if (ShouldOptimizeForMemoryUsage()) {
    4312             :     return Heap::HeapGrowingMode::kConservative;
    4313             :   }
    4314             : 
    4315       66343 :   if (memory_reducer()->ShouldGrowHeapSlowly()) {
    4316             :     return Heap::HeapGrowingMode::kSlow;
    4317             :   }
    4318             : 
    4319       66329 :   return Heap::HeapGrowingMode::kDefault;
    4320             : }
    4321             : 
    4322             : // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
    4323             : // The kNoLimit means that either incremental marking is disabled or it is too
    4324             : // early to start incremental marking.
    4325             : // The kSoftLimit means that incremental marking should be started soon.
    4326             : // The kHardLimit means that incremental marking should be started immediately.
    4327     1414024 : Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
    4328             :   // Code using an AlwaysAllocateScope assumes that the GC state does not
    4329             :   // change; that implies that no marking steps must be performed.
    4330     2313849 :   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
    4331             :     // Incremental marking is disabled or it is too early to start.
    4332             :     return IncrementalMarkingLimit::kNoLimit;
    4333             :   }
    4334      898713 :   if (FLAG_stress_incremental_marking) {
    4335             :     return IncrementalMarkingLimit::kHardLimit;
    4336             :   }
    4337      871661 :   if (OldGenerationSizeOfObjects() <=
    4338             :       IncrementalMarking::kActivationThreshold) {
    4339             :     // Incremental marking is disabled or it is too early to start.
    4340             :     return IncrementalMarkingLimit::kNoLimit;
    4341             :   }
    4342       19914 :   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
    4343             :       HighMemoryPressure()) {
    4344             :     // If there is high memory pressure or stress testing is enabled, then
    4345             :     // start marking immediately.
    4346             :     return IncrementalMarkingLimit::kHardLimit;
    4347             :   }
    4348             : 
    4349        9957 :   if (FLAG_stress_marking > 0) {
    4350             :     double gained_since_last_gc =
    4351           0 :         PromotedSinceLastGC() +
    4352           0 :         (isolate()->isolate_data()->external_memory_ -
    4353           0 :          isolate()->isolate_data()->external_memory_at_last_mark_compact_);
    4354             :     double size_before_gc =
    4355           0 :         OldGenerationObjectsAndPromotedExternalMemorySize() -
    4356           0 :         gained_since_last_gc;
    4357           0 :     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
    4358           0 :     if (bytes_to_limit > 0) {
    4359           0 :       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
    4360             : 
    4361           0 :       if (FLAG_trace_stress_marking) {
    4362             :         isolate()->PrintWithTimestamp(
    4363             :             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
    4364           0 :             current_percent);
    4365             :       }
    4366             : 
    4367           0 :       if (FLAG_fuzzer_gc_analysis) {
    4368             :         // Skips values >=100% since they already trigger marking.
    4369           0 :         if (current_percent < 100.0) {
    4370             :           max_marking_limit_reached_ =
    4371           0 :               std::max(max_marking_limit_reached_, current_percent);
    4372             :         }
    4373           0 :       } else if (static_cast<int>(current_percent) >=
    4374           0 :                  stress_marking_percentage_) {
    4375           0 :         stress_marking_percentage_ = NextStressMarkingLimit();
    4376           0 :         return IncrementalMarkingLimit::kHardLimit;
    4377             :       }
    4378             :     }
    4379             :   }
    4380             : 
    4381        9957 :   size_t old_generation_space_available = OldGenerationSpaceAvailable();
    4382             : 
    4383       19914 :   if (old_generation_space_available > new_space_->Capacity()) {
    4384             :     return IncrementalMarkingLimit::kNoLimit;
    4385             :   }
    4386        1378 :   if (ShouldOptimizeForMemoryUsage()) {
    4387             :     return IncrementalMarkingLimit::kHardLimit;
    4388             :   }
    4389        1362 :   if (ShouldOptimizeForLoadTime()) {
    4390             :     return IncrementalMarkingLimit::kNoLimit;
    4391             :   }
    4392        1362 :   if (old_generation_space_available == 0) {
    4393             :     return IncrementalMarkingLimit::kHardLimit;
    4394             :   }
    4395        1277 :   return IncrementalMarkingLimit::kSoftLimit;
    4396             : }
    4397             : 
    4398          10 : void Heap::EnableInlineAllocation() {
    4399        8180 :   if (!inline_allocation_disabled_) return;
    4400        8180 :   inline_allocation_disabled_ = false;
    4401             : 
    4402             :   // Update inline allocation limit for new space.
    4403        8180 :   new_space()->UpdateInlineAllocationLimit(0);
    4404             : }
    4405             : 
    4406             : 
    4407        8204 : void Heap::DisableInlineAllocation() {
    4408        8204 :   if (inline_allocation_disabled_) return;
    4409        8204 :   inline_allocation_disabled_ = true;
    4410             : 
    4411             :   // Update inline allocation limit for new space.
    4412        8204 :   new_space()->UpdateInlineAllocationLimit(0);
    4413             : 
    4414             :   // Update inline allocation limit for old spaces.
    4415             :   PagedSpaces spaces(this);
    4416       16408 :   CodeSpaceMemoryModificationScope modification_scope(this);
    4417       32816 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4418             :        space = spaces.next()) {
    4419       24612 :     space->FreeLinearAllocationArea();
    4420             :   }
    4421             : }
    4422             : 
    4423       44038 : HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
    4424             :   // Code objects which should stay at a fixed address are allocated either
    4425             :   // in the first page of code space, in large object space, or (during
    4426             :   // snapshot creation) the containing page is marked as immovable.
    4427             :   DCHECK(!heap_object.is_null());
    4428             :   DCHECK(code_space_->Contains(heap_object));
    4429             :   DCHECK_GE(object_size, 0);
    4430       44038 :   if (!Heap::IsImmovable(heap_object)) {
    4431       79516 :     if (isolate()->serializer_enabled() ||
    4432       39755 :         code_space_->first_page()->Contains(heap_object->address())) {
    4433             :       MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
    4434             :     } else {
    4435             :       // Discard the first code allocation, which was on a page where it could
    4436             :       // be moved.
    4437             :       CreateFillerObjectAt(heap_object->address(), object_size,
    4438       39755 :                            ClearRecordedSlots::kNo);
    4439       39755 :       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
    4440             :       UnprotectAndRegisterMemoryChunk(heap_object);
    4441             :       ZapCodeObject(heap_object->address(), object_size);
    4442       39755 :       OnAllocationEvent(heap_object, object_size);
    4443             :     }
    4444             :   }
    4445       44038 :   return heap_object;
    4446             : }
    4447             : 
    4448   333029089 : HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
    4449             :                                            AllocationAlignment alignment) {
    4450             :   HeapObject result;
    4451   333029089 :   AllocationResult alloc = AllocateRaw(size, allocation, alignment);
    4452   333028607 :   if (alloc.To(&result)) {
    4453             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4454   333010829 :     return result;
    4455             :   }
    4456             :   // Two GCs before panicking. In newspace will almost always succeed.
    4457       17844 :   for (int i = 0; i < 2; i++) {
    4458             :     CollectGarbage(alloc.RetrySpace(),
    4459       17806 :                    GarbageCollectionReason::kAllocationFailure);
    4460       17806 :     alloc = AllocateRaw(size, allocation, alignment);
    4461       17806 :     if (alloc.To(&result)) {
    4462             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4463       17773 :       return result;
    4464             :     }
    4465             :   }
    4466           5 :   return HeapObject();
    4467             : }
    4468             : 
    4469   331440902 : HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
    4470             :                                             AllocationAlignment alignment) {
    4471             :   AllocationResult alloc;
    4472   331440902 :   HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
    4473   331440436 :   if (!result.is_null()) return result;
    4474             : 
    4475           5 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4476           5 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4477             :   {
    4478             :     AlwaysAllocateScope scope(isolate());
    4479           5 :     alloc = AllocateRaw(size, allocation, alignment);
    4480             :   }
    4481           5 :   if (alloc.To(&result)) {
    4482             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4483           5 :     return result;
    4484             :   }
    4485             :   // TODO(1181417): Fix this.
    4486           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4487             :   return HeapObject();
    4488             : }
    4489             : 
    4490             : // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
    4491             : // parameter and just do what's necessary.
    4492       39755 : HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
    4493       39755 :   AllocationResult alloc = code_lo_space()->AllocateRaw(size);
    4494             :   HeapObject result;
    4495       39754 :   if (alloc.To(&result)) {
    4496             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4497       39751 :     return result;
    4498             :   }
    4499             :   // Two GCs before panicking.
    4500           3 :   for (int i = 0; i < 2; i++) {
    4501             :     CollectGarbage(alloc.RetrySpace(),
    4502           3 :                    GarbageCollectionReason::kAllocationFailure);
    4503           3 :     alloc = code_lo_space()->AllocateRaw(size);
    4504           3 :     if (alloc.To(&result)) {
    4505             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4506           3 :       return result;
    4507             :     }
    4508             :   }
    4509           0 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4510           0 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4511             :   {
    4512             :     AlwaysAllocateScope scope(isolate());
    4513           0 :     alloc = code_lo_space()->AllocateRaw(size);
    4514             :   }
    4515           0 :   if (alloc.To(&result)) {
    4516             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4517           0 :     return result;
    4518             :   }
    4519             :   // TODO(1181417): Fix this.
    4520           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4521             :   return HeapObject();
    4522             : }
    4523             : 
    4524       61531 : void Heap::SetUp() {
    4525             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    4526             :   allocation_timeout_ = NextAllocationTimeout();
    4527             : #endif
    4528             : 
    4529             :   // Initialize heap spaces and initial maps and objects.
    4530             :   //
    4531             :   // If the heap is not yet configured (e.g. through the API), configure it.
    4532             :   // Configuration is based on the flags new-space-size (really the semispace
    4533             :   // size) and old-space-size if set or the initial values of semispace_size_
    4534             :   // and old_generation_size_ otherwise.
    4535       61531 :   if (!configured_) ConfigureHeapDefault();
    4536             : 
    4537             :   mmap_region_base_ =
    4538       61532 :       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
    4539       61534 :       ~kMmapRegionMask;
    4540             : 
    4541             :   // Set up memory allocator.
    4542      123068 :   memory_allocator_.reset(
    4543      123068 :       new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
    4544             : 
    4545       61533 :   store_buffer_.reset(new StoreBuffer(this));
    4546             : 
    4547       61532 :   heap_controller_.reset(new HeapController(this));
    4548             : 
    4549       61533 :   mark_compact_collector_.reset(new MarkCompactCollector(this));
    4550             : 
    4551       61534 :   scavenger_collector_.reset(new ScavengerCollector(this));
    4552             : 
    4553      123067 :   incremental_marking_.reset(
    4554             :       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
    4555      123068 :                              mark_compact_collector_->weak_objects()));
    4556             : 
    4557       61534 :   if (FLAG_concurrent_marking || FLAG_parallel_marking) {
    4558             :     MarkCompactCollector::MarkingWorklist* marking_worklist =
    4559             :         mark_compact_collector_->marking_worklist();
    4560      122847 :     concurrent_marking_.reset(new ConcurrentMarking(
    4561             :         this, marking_worklist->shared(), marking_worklist->on_hold(),
    4562      122847 :         mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
    4563             :   } else {
    4564         220 :     concurrent_marking_.reset(
    4565         220 :         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
    4566             :   }
    4567             : 
    4568     1046062 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4569      492264 :     space_[i] = nullptr;
    4570             :   }
    4571       61534 : }
    4572             : 
    4573       61534 : void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
    4574             :   DCHECK_NOT_NULL(ro_heap);
    4575             :   DCHECK_IMPLIES(read_only_space_ != nullptr,
    4576             :                  read_only_space_ == ro_heap->read_only_space());
    4577       61534 :   read_only_heap_ = ro_heap;
    4578       61534 :   space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
    4579       61534 : }
    4580             : 
    4581       61534 : void Heap::SetUpSpaces() {
    4582             :   // Ensure SetUpFromReadOnlySpace has been ran.
    4583             :   DCHECK_NOT_NULL(read_only_space_);
    4584             :   space_[NEW_SPACE] = new_space_ =
    4585             :       new NewSpace(this, memory_allocator_->data_page_allocator(),
    4586       61534 :                    initial_semispace_size_, max_semi_space_size_);
    4587       61534 :   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
    4588       61534 :   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
    4589       61534 :   space_[MAP_SPACE] = map_space_ = new MapSpace(this);
    4590       61534 :   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
    4591             :   space_[NEW_LO_SPACE] = new_lo_space_ =
    4592      123068 :       new NewLargeObjectSpace(this, new_space_->Capacity());
    4593       61534 :   space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
    4594             : 
    4595     9414702 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
    4596             :        i++) {
    4597     4676584 :     deferred_counters_[i] = 0;
    4598             :   }
    4599             : 
    4600       61534 :   tracer_.reset(new GCTracer(this));
    4601             : #ifdef ENABLE_MINOR_MC
    4602       61534 :   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
    4603             : #else
    4604             :   minor_mark_compact_collector_ = nullptr;
    4605             : #endif  // ENABLE_MINOR_MC
    4606      123068 :   array_buffer_collector_.reset(new ArrayBufferCollector(this));
    4607       61534 :   gc_idle_time_handler_.reset(new GCIdleTimeHandler());
    4608       61534 :   memory_reducer_.reset(new MemoryReducer(this));
    4609       61534 :   if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
    4610           0 :     live_object_stats_.reset(new ObjectStats(this));
    4611           0 :     dead_object_stats_.reset(new ObjectStats(this));
    4612             :   }
    4613      123068 :   local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
    4614             : 
    4615       61534 :   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
    4616       61534 :   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
    4617             : 
    4618       61534 :   store_buffer()->SetUp();
    4619             : 
    4620       61534 :   mark_compact_collector()->SetUp();
    4621             : #ifdef ENABLE_MINOR_MC
    4622       61534 :   if (minor_mark_compact_collector() != nullptr) {
    4623       61534 :     minor_mark_compact_collector()->SetUp();
    4624             :   }
    4625             : #endif  // ENABLE_MINOR_MC
    4626             : 
    4627       61534 :   if (FLAG_idle_time_scavenge) {
    4628       61534 :     scavenge_job_.reset(new ScavengeJob());
    4629       61534 :     idle_scavenge_observer_.reset(new IdleScavengeObserver(
    4630             :         *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
    4631      123068 :     new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
    4632             :   }
    4633             : 
    4634             :   SetGetExternallyAllocatedMemoryInBytesCallback(
    4635             :       DefaultGetExternallyAllocatedMemoryInBytesCallback);
    4636             : 
    4637       61534 :   if (FLAG_stress_marking > 0) {
    4638           0 :     stress_marking_percentage_ = NextStressMarkingLimit();
    4639           0 :     stress_marking_observer_ = new StressMarkingObserver(*this);
    4640             :     AddAllocationObserversToAllSpaces(stress_marking_observer_,
    4641           0 :                                       stress_marking_observer_);
    4642             :   }
    4643       61534 :   if (FLAG_stress_scavenge > 0) {
    4644           0 :     stress_scavenge_observer_ = new StressScavengeObserver(*this);
    4645           0 :     new_space()->AddAllocationObserver(stress_scavenge_observer_);
    4646             :   }
    4647             : 
    4648       61534 :   write_protect_code_memory_ = FLAG_write_protect_code_memory;
    4649       61534 : }
    4650             : 
    4651       61474 : void Heap::InitializeHashSeed() {
    4652             :   DCHECK(!deserialization_complete_);
    4653             :   uint64_t new_hash_seed;
    4654       61474 :   if (FLAG_hash_seed == 0) {
    4655       61404 :     int64_t rnd = isolate()->random_number_generator()->NextInt64();
    4656       61404 :     new_hash_seed = static_cast<uint64_t>(rnd);
    4657             :   } else {
    4658          70 :     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
    4659             :   }
    4660             :   ReadOnlyRoots(this).hash_seed()->copy_in(
    4661             :       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
    4662       61474 : }
    4663             : 
    4664    16164047 : void Heap::SetStackLimits() {
    4665             :   DCHECK_NOT_NULL(isolate_);
    4666             :   DCHECK(isolate_ == isolate());
    4667             :   // On 64 bit machines, pointers are generally out of range of Smis.  We write
    4668             :   // something that looks like an out of range Smi to the GC.
    4669             : 
    4670             :   // Set up the special root array entries containing the stack limits.
    4671             :   // These are actually addresses, but the tag makes the GC ignore it.
    4672             :   roots_table()[RootIndex::kStackLimit] =
    4673    32328094 :       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
    4674             :   roots_table()[RootIndex::kRealStackLimit] =
    4675    16164047 :       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
    4676    16164047 : }
    4677             : 
    4678         261 : void Heap::ClearStackLimits() {
    4679         261 :   roots_table()[RootIndex::kStackLimit] = kNullAddress;
    4680         261 :   roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
    4681         261 : }
    4682             : 
    4683           0 : int Heap::NextAllocationTimeout(int current_timeout) {
    4684           0 :   if (FLAG_random_gc_interval > 0) {
    4685             :     // If current timeout hasn't reached 0 the GC was caused by something
    4686             :     // different than --stress-atomic-gc flag and we don't update the timeout.
    4687           0 :     if (current_timeout <= 0) {
    4688           0 :       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
    4689             :     } else {
    4690             :       return current_timeout;
    4691             :     }
    4692             :   }
    4693           0 :   return FLAG_gc_interval;
    4694             : }
    4695             : 
    4696           0 : void Heap::PrintAllocationsHash() {
    4697           0 :   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
    4698           0 :   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
    4699           0 : }
    4700             : 
    4701           0 : void Heap::PrintMaxMarkingLimitReached() {
    4702           0 :   PrintF("\n### Maximum marking limit reached = %.02lf\n",
    4703           0 :          max_marking_limit_reached_);
    4704           0 : }
    4705             : 
    4706           0 : void Heap::PrintMaxNewSpaceSizeReached() {
    4707           0 :   PrintF("\n### Maximum new space size reached = %.02lf\n",
    4708           0 :          stress_scavenge_observer_->MaxNewSpaceSizeReached());
    4709           0 : }
    4710             : 
    4711           0 : int Heap::NextStressMarkingLimit() {
    4712           0 :   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
    4713             : }
    4714             : 
    4715       61534 : void Heap::NotifyDeserializationComplete() {
    4716             :   PagedSpaces spaces(this);
    4717      246133 :   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
    4718      184601 :     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
    4719             : #ifdef DEBUG
    4720             :     // All pages right after bootstrapping must be marked as never-evacuate.
    4721             :     for (Page* p : *s) {
    4722             :       DCHECK(p->NeverEvacuate());
    4723             :     }
    4724             : #endif  // DEBUG
    4725             :   }
    4726             : 
    4727       61534 :   deserialization_complete_ = true;
    4728       61534 : }
    4729             : 
    4730       90439 : void Heap::NotifyBootstrapComplete() {
    4731             :   // This function is invoked for each native context creation. We are
    4732             :   // interested only in the first native context.
    4733       90439 :   if (old_generation_capacity_after_bootstrap_ == 0) {
    4734       58801 :     old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
    4735             :   }
    4736       90439 : }
    4737             : 
    4738      481153 : void Heap::NotifyOldGenerationExpansion() {
    4739             :   const size_t kMemoryReducerActivationThreshold = 1 * MB;
    4740     1147757 :   if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
    4741       82919 :       OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
    4742      492759 :                                      kMemoryReducerActivationThreshold &&
    4743             :       FLAG_memory_reducer_for_small_heaps) {
    4744             :     MemoryReducer::Event event;
    4745       11606 :     event.type = MemoryReducer::kPossibleGarbage;
    4746       11606 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    4747       11606 :     memory_reducer()->NotifyPossibleGarbage(event);
    4748             :   }
    4749      481153 : }
    4750             : 
    4751         150 : void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
    4752             :   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
    4753         150 :   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
    4754         150 : }
    4755             : 
    4756           0 : EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
    4757           0 :   return local_embedder_heap_tracer()->remote_tracer();
    4758             : }
    4759             : 
    4760           5 : void Heap::RegisterExternallyReferencedObject(Address* location) {
    4761             :   // The embedder is not aware of whether numbers are materialized as heap
    4762             :   // objects are just passed around as Smis.
    4763           5 :   Object object(*location);
    4764           5 :   if (!object->IsHeapObject()) return;
    4765             :   HeapObject heap_object = HeapObject::cast(object);
    4766             :   DCHECK(Contains(heap_object));
    4767          10 :   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
    4768           0 :     incremental_marking()->WhiteToGreyAndPush(heap_object);
    4769             :   } else {
    4770             :     DCHECK(mark_compact_collector()->in_use());
    4771             :     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
    4772             :   }
    4773             : }
    4774             : 
    4775      123034 : void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
    4776             : 
    4777       61518 : void Heap::TearDown() {
    4778             :   DCHECK_EQ(gc_state_, TEAR_DOWN);
    4779             : #ifdef VERIFY_HEAP
    4780             :   if (FLAG_verify_heap) {
    4781             :     Verify();
    4782             :   }
    4783             : #endif
    4784             : 
    4785             :   UpdateMaximumCommitted();
    4786             : 
    4787       61518 :   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
    4788           0 :     PrintAllocationsHash();
    4789             :   }
    4790             : 
    4791       61517 :   if (FLAG_fuzzer_gc_analysis) {
    4792           0 :     if (FLAG_stress_marking > 0) {
    4793             :       PrintMaxMarkingLimitReached();
    4794             :     }
    4795           0 :     if (FLAG_stress_scavenge > 0) {
    4796           0 :       PrintMaxNewSpaceSizeReached();
    4797             :     }
    4798             :   }
    4799             : 
    4800       61517 :   if (FLAG_idle_time_scavenge) {
    4801      123036 :     new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
    4802             :     idle_scavenge_observer_.reset();
    4803             :     scavenge_job_.reset();
    4804             :   }
    4805             : 
    4806       61517 :   if (FLAG_stress_marking > 0) {
    4807           0 :     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
    4808           0 :                                            stress_marking_observer_);
    4809           0 :     delete stress_marking_observer_;
    4810           0 :     stress_marking_observer_ = nullptr;
    4811             :   }
    4812       61517 :   if (FLAG_stress_scavenge > 0) {
    4813           0 :     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
    4814           0 :     delete stress_scavenge_observer_;
    4815           0 :     stress_scavenge_observer_ = nullptr;
    4816             :   }
    4817             : 
    4818             :   heap_controller_.reset();
    4819             : 
    4820       61518 :   if (mark_compact_collector_) {
    4821       61518 :     mark_compact_collector_->TearDown();
    4822             :     mark_compact_collector_.reset();
    4823             :   }
    4824             : 
    4825             : #ifdef ENABLE_MINOR_MC
    4826       61518 :   if (minor_mark_compact_collector_ != nullptr) {
    4827       61518 :     minor_mark_compact_collector_->TearDown();
    4828       61518 :     delete minor_mark_compact_collector_;
    4829       61519 :     minor_mark_compact_collector_ = nullptr;
    4830             :   }
    4831             : #endif  // ENABLE_MINOR_MC
    4832             : 
    4833             :   scavenger_collector_.reset();
    4834       61518 :   array_buffer_collector_.reset();
    4835       61519 :   incremental_marking_.reset();
    4836       61519 :   concurrent_marking_.reset();
    4837             : 
    4838             :   gc_idle_time_handler_.reset();
    4839             : 
    4840       61520 :   if (memory_reducer_ != nullptr) {
    4841       61519 :     memory_reducer_->TearDown();
    4842             :     memory_reducer_.reset();
    4843             :   }
    4844             : 
    4845             :   live_object_stats_.reset();
    4846             :   dead_object_stats_.reset();
    4847             : 
    4848       61519 :   local_embedder_heap_tracer_.reset();
    4849             : 
    4850       61517 :   external_string_table_.TearDown();
    4851             : 
    4852             :   // Tear down all ArrayBuffers before tearing down the heap since  their
    4853             :   // byte_length may be a HeapNumber which is required for freeing the backing
    4854             :   // store.
    4855       61519 :   ArrayBufferTracker::TearDown(this);
    4856             : 
    4857       61518 :   tracer_.reset();
    4858             : 
    4859       61519 :   read_only_heap_->OnHeapTearDown();
    4860       61521 :   space_[RO_SPACE] = read_only_space_ = nullptr;
    4861      922773 :   for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
    4862      430629 :     delete space_[i];
    4863      430626 :     space_[i] = nullptr;
    4864             :   }
    4865             : 
    4866       61518 :   store_buffer()->TearDown();
    4867             : 
    4868       61519 :   memory_allocator()->TearDown();
    4869             : 
    4870             :   StrongRootsList* next = nullptr;
    4871       61519 :   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
    4872           0 :     next = list->next;
    4873           0 :     delete list;
    4874             :   }
    4875       61519 :   strong_roots_list_ = nullptr;
    4876             : 
    4877             :   store_buffer_.reset();
    4878       61519 :   memory_allocator_.reset();
    4879       61518 : }
    4880             : 
    4881          35 : void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4882             :                                  GCType gc_type, void* data) {
    4883             :   DCHECK_NOT_NULL(callback);
    4884             :   DCHECK(gc_prologue_callbacks_.end() ==
    4885             :          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
    4886             :                    GCCallbackTuple(callback, gc_type, data)));
    4887          35 :   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
    4888          35 : }
    4889             : 
    4890          35 : void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4891             :                                     void* data) {
    4892             :   DCHECK_NOT_NULL(callback);
    4893          35 :   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
    4894          70 :     if (gc_prologue_callbacks_[i].callback == callback &&
    4895          35 :         gc_prologue_callbacks_[i].data == data) {
    4896             :       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
    4897             :       gc_prologue_callbacks_.pop_back();
    4898          35 :       return;
    4899             :     }
    4900             :   }
    4901           0 :   UNREACHABLE();
    4902             : }
    4903             : 
    4904       69916 : void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4905             :                                  GCType gc_type, void* data) {
    4906             :   DCHECK_NOT_NULL(callback);
    4907             :   DCHECK(gc_epilogue_callbacks_.end() ==
    4908             :          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
    4909             :                    GCCallbackTuple(callback, gc_type, data)));
    4910       69916 :   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
    4911       69916 : }
    4912             : 
    4913        8382 : void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4914             :                                     void* data) {
    4915             :   DCHECK_NOT_NULL(callback);
    4916       25146 :   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
    4917       25146 :     if (gc_epilogue_callbacks_[i].callback == callback &&
    4918        8382 :         gc_epilogue_callbacks_[i].data == data) {
    4919             :       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
    4920             :       gc_epilogue_callbacks_.pop_back();
    4921        8382 :       return;
    4922             :     }
    4923             :   }
    4924           0 :   UNREACHABLE();
    4925             : }
    4926             : 
    4927             : namespace {
    4928         392 : Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
    4929             :                                            Handle<WeakArrayList> array,
    4930             :                                            AllocationType allocation) {
    4931         392 :   if (array->length() == 0) {
    4932           0 :     return array;
    4933             :   }
    4934         392 :   int new_length = array->CountLiveWeakReferences();
    4935         392 :   if (new_length == array->length()) {
    4936         282 :     return array;
    4937             :   }
    4938             : 
    4939             :   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
    4940             :       heap->isolate(),
    4941             :       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
    4942         110 :       new_length, allocation);
    4943             :   // Allocation might have caused GC and turned some of the elements into
    4944             :   // cleared weak heap objects. Count the number of live references again and
    4945             :   // fill in the new array.
    4946             :   int copy_to = 0;
    4947       19780 :   for (int i = 0; i < array->length(); i++) {
    4948             :     MaybeObject element = array->Get(i);
    4949        9835 :     if (element->IsCleared()) continue;
    4950       19020 :     new_array->Set(copy_to++, element);
    4951             :   }
    4952             :   new_array->set_length(copy_to);
    4953         110 :   return new_array;
    4954             : }
    4955             : 
    4956             : }  // anonymous namespace
    4957             : 
    4958         196 : void Heap::CompactWeakArrayLists(AllocationType allocation) {
    4959             :   // Find known PrototypeUsers and compact them.
    4960             :   std::vector<Handle<PrototypeInfo>> prototype_infos;
    4961             :   {
    4962         392 :     HeapIterator iterator(this);
    4963     1610911 :     for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
    4964     1610715 :       if (o->IsPrototypeInfo()) {
    4965             :         PrototypeInfo prototype_info = PrototypeInfo::cast(o);
    4966       13187 :         if (prototype_info->prototype_users()->IsWeakArrayList()) {
    4967          25 :           prototype_infos.emplace_back(handle(prototype_info, isolate()));
    4968             :         }
    4969             :       }
    4970             :     }
    4971             :   }
    4972         221 :   for (auto& prototype_info : prototype_infos) {
    4973             :     Handle<WeakArrayList> array(
    4974             :         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
    4975             :     DCHECK_IMPLIES(allocation == AllocationType::kOld,
    4976             :                    InOldSpace(*array) ||
    4977             :                        *array == ReadOnlyRoots(this).empty_weak_array_list());
    4978             :     WeakArrayList new_array = PrototypeUsers::Compact(
    4979          25 :         array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
    4980          25 :     prototype_info->set_prototype_users(new_array);
    4981             :   }
    4982             : 
    4983             :   // Find known WeakArrayLists and compact them.
    4984             :   Handle<WeakArrayList> scripts(script_list(), isolate());
    4985             :   DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
    4986         196 :   scripts = CompactWeakArrayList(this, scripts, allocation);
    4987             :   set_script_list(*scripts);
    4988             : 
    4989             :   Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
    4990             :                                        isolate());
    4991             :   DCHECK_IMPLIES(allocation == AllocationType::kOld,
    4992             :                  InOldSpace(*no_script_list));
    4993         196 :   no_script_list = CompactWeakArrayList(this, no_script_list, allocation);
    4994             :   set_noscript_shared_function_infos(*no_script_list);
    4995         196 : }
    4996             : 
    4997      135543 : void Heap::AddRetainedMap(Handle<Map> map) {
    4998      135543 :   if (map->is_in_retained_map_list()) {
    4999             :     return;
    5000             :   }
    5001             :   Handle<WeakArrayList> array(retained_maps(), isolate());
    5002       45629 :   if (array->IsFull()) {
    5003       11996 :     CompactRetainedMaps(*array);
    5004             :   }
    5005             :   array =
    5006       45629 :       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
    5007             :   array = WeakArrayList::AddToEnd(
    5008             :       isolate(), array,
    5009       91258 :       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
    5010       45629 :   if (*array != retained_maps()) {
    5011             :     set_retained_maps(*array);
    5012             :   }
    5013             :   map->set_is_in_retained_map_list(true);
    5014             : }
    5015             : 
    5016       11996 : void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
    5017             :   DCHECK_EQ(retained_maps, this->retained_maps());
    5018             :   int length = retained_maps->length();
    5019             :   int new_length = 0;
    5020             :   int new_number_of_disposed_maps = 0;
    5021             :   // This loop compacts the array by removing cleared weak cells.
    5022       89872 :   for (int i = 0; i < length; i += 2) {
    5023             :     MaybeObject maybe_object = retained_maps->Get(i);
    5024       38938 :     if (maybe_object->IsCleared()) {
    5025        8768 :       continue;
    5026             :     }
    5027             : 
    5028             :     DCHECK(maybe_object->IsWeak());
    5029             : 
    5030       30170 :     MaybeObject age = retained_maps->Get(i + 1);
    5031             :     DCHECK(age->IsSmi());
    5032       30170 :     if (i != new_length) {
    5033        3195 :       retained_maps->Set(new_length, maybe_object);
    5034        3195 :       retained_maps->Set(new_length + 1, age);
    5035             :     }
    5036       30170 :     if (i < number_of_disposed_maps_) {
    5037          74 :       new_number_of_disposed_maps += 2;
    5038             :     }
    5039       30170 :     new_length += 2;
    5040             :   }
    5041       11996 :   number_of_disposed_maps_ = new_number_of_disposed_maps;
    5042             :   HeapObject undefined = ReadOnlyRoots(this).undefined_value();
    5043       47068 :   for (int i = new_length; i < length; i++) {
    5044       17536 :     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
    5045             :   }
    5046       11996 :   if (new_length != length) retained_maps->set_length(new_length);
    5047       11996 : }
    5048             : 
    5049           0 : void Heap::FatalProcessOutOfMemory(const char* location) {
    5050           0 :   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
    5051             : }
    5052             : 
    5053             : #ifdef DEBUG
    5054             : 
    5055             : class PrintHandleVisitor : public RootVisitor {
    5056             :  public:
    5057             :   void VisitRootPointers(Root root, const char* description,
    5058             :                          FullObjectSlot start, FullObjectSlot end) override {
    5059             :     for (FullObjectSlot p = start; p < end; ++p)
    5060             :       PrintF("  handle %p to %p\n", p.ToVoidPtr(),
    5061             :              reinterpret_cast<void*>((*p).ptr()));
    5062             :   }
    5063             : };
    5064             : 
    5065             : 
    5066             : void Heap::PrintHandles() {
    5067             :   PrintF("Handles:\n");
    5068             :   PrintHandleVisitor v;
    5069             :   isolate_->handle_scope_implementer()->Iterate(&v);
    5070             : }
    5071             : 
    5072             : #endif
    5073             : 
    5074             : class CheckHandleCountVisitor : public RootVisitor {
    5075             :  public:
    5076           0 :   CheckHandleCountVisitor() : handle_count_(0) {}
    5077           0 :   ~CheckHandleCountVisitor() override {
    5078           0 :     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
    5079           0 :   }
    5080           0 :   void VisitRootPointers(Root root, const char* description,
    5081             :                          FullObjectSlot start, FullObjectSlot end) override {
    5082           0 :     handle_count_ += end - start;
    5083           0 :   }
    5084             : 
    5085             :  private:
    5086             :   ptrdiff_t handle_count_;
    5087             : };
    5088             : 
    5089             : 
    5090           0 : void Heap::CheckHandleCount() {
    5091             :   CheckHandleCountVisitor v;
    5092           0 :   isolate_->handle_scope_implementer()->Iterate(&v);
    5093           0 : }
    5094             : 
    5095       61646 : Address* Heap::store_buffer_top_address() {
    5096       61646 :   return store_buffer()->top_address();
    5097             : }
    5098             : 
    5099             : // static
    5100         112 : intptr_t Heap::store_buffer_mask_constant() {
    5101         112 :   return StoreBuffer::kStoreBufferMask;
    5102             : }
    5103             : 
    5104             : // static
    5105       61650 : Address Heap::store_buffer_overflow_function_address() {
    5106       61650 :   return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
    5107             : }
    5108             : 
    5109        3955 : void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
    5110             :   DCHECK(!IsLargeObject(object));
    5111             :   Page* page = Page::FromAddress(slot.address());
    5112      366491 :   if (!page->InYoungGeneration()) {
    5113             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5114             :     store_buffer()->DeleteEntry(slot.address());
    5115             :   }
    5116        3955 : }
    5117             : 
    5118             : #ifdef DEBUG
    5119             : void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
    5120             :   DCHECK(!IsLargeObject(object));
    5121             :   if (InYoungGeneration(object)) return;
    5122             :   Page* page = Page::FromAddress(slot.address());
    5123             :   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5124             :   store_buffer()->MoveAllEntriesToRememberedSet();
    5125             :   CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
    5126             :   // Old to old slots are filtered with invalidated slots.
    5127             :   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
    5128             :                 page->RegisteredObjectWithInvalidatedSlots(object));
    5129             : }
    5130             : #endif
    5131             : 
    5132      281556 : void Heap::ClearRecordedSlotRange(Address start, Address end) {
    5133             :   Page* page = Page::FromAddress(start);
    5134             :   DCHECK(!page->IsLargePage());
    5135     2217382 :   if (!page->InYoungGeneration()) {
    5136             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5137             :     store_buffer()->DeleteEntry(start, end);
    5138             :   }
    5139      281556 : }
    5140             : 
    5141    26683104 : PagedSpace* PagedSpaces::next() {
    5142    26683104 :   switch (counter_++) {
    5143             :     case RO_SPACE:
    5144             :       // skip NEW_SPACE
    5145     5069830 :       counter_++;
    5146     5069830 :       return heap_->read_only_space();
    5147             :     case OLD_SPACE:
    5148     5403337 :       return heap_->old_space();
    5149             :     case CODE_SPACE:
    5150     5403342 :       return heap_->code_space();
    5151             :     case MAP_SPACE:
    5152     5403337 :       return heap_->map_space();
    5153             :     default:
    5154             :       return nullptr;
    5155             :   }
    5156             : }
    5157             : 
    5158      218745 : SpaceIterator::SpaceIterator(Heap* heap)
    5159      226565 :     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
    5160             : 
    5161             : SpaceIterator::~SpaceIterator() = default;
    5162             : 
    5163     1968705 : bool SpaceIterator::has_next() {
    5164             :   // Iterate until no more spaces.
    5165     2031265 :   return current_space_ != LAST_SPACE;
    5166             : }
    5167             : 
    5168     1749960 : Space* SpaceIterator::next() {
    5169             :   DCHECK(has_next());
    5170     8849816 :   return heap_->space(++current_space_);
    5171             : }
    5172             : 
    5173             : 
    5174        1290 : class HeapObjectsFilter {
    5175             :  public:
    5176        1290 :   virtual ~HeapObjectsFilter() = default;
    5177             :   virtual bool SkipObject(HeapObject object) = 0;
    5178             : };
    5179             : 
    5180             : 
    5181             : class UnreachableObjectsFilter : public HeapObjectsFilter {
    5182             :  public:
    5183        1290 :   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
    5184        1290 :     MarkReachableObjects();
    5185             :   }
    5186             : 
    5187        3870 :   ~UnreachableObjectsFilter() override {
    5188       11287 :     for (auto it : reachable_) {
    5189       19994 :       delete it.second;
    5190             :       it.second = nullptr;
    5191             :     }
    5192        2580 :   }
    5193             : 
    5194    11035571 :   bool SkipObject(HeapObject object) override {
    5195    11035571 :     if (object->IsFiller()) return true;
    5196    11035571 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5197    11035571 :     if (reachable_.count(chunk) == 0) return true;
    5198    22070992 :     return reachable_[chunk]->count(object) == 0;
    5199             :   }
    5200             : 
    5201             :  private:
    5202    55709271 :   bool MarkAsReachable(HeapObject object) {
    5203    55709271 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5204    55709271 :     if (reachable_.count(chunk) == 0) {
    5205       19994 :       reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
    5206             :     }
    5207   111418542 :     if (reachable_[chunk]->count(object)) return false;
    5208    10447180 :     reachable_[chunk]->insert(object);
    5209    10447180 :     return true;
    5210             :   }
    5211             : 
    5212        2580 :   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
    5213             :    public:
    5214             :     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
    5215        1290 :         : filter_(filter) {}
    5216             : 
    5217    22455556 :     void VisitPointers(HeapObject host, ObjectSlot start,
    5218             :                        ObjectSlot end) override {
    5219    22455556 :       MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
    5220    22455556 :     }
    5221             : 
    5222     1202984 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
    5223             :                        MaybeObjectSlot end) final {
    5224     1202984 :       MarkPointers(start, end);
    5225     1202984 :     }
    5226             : 
    5227        9030 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    5228        9030 :       Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5229             :       MarkHeapObject(target);
    5230        9030 :     }
    5231       40278 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    5232             :       MarkHeapObject(rinfo->target_object());
    5233       40278 :     }
    5234             : 
    5235     3984033 :     void VisitRootPointers(Root root, const char* description,
    5236             :                            FullObjectSlot start, FullObjectSlot end) override {
    5237             :       MarkPointersImpl(start, end);
    5238     3984033 :     }
    5239             : 
    5240        1290 :     void TransitiveClosure() {
    5241    20895650 :       while (!marking_stack_.empty()) {
    5242    10447180 :         HeapObject obj = marking_stack_.back();
    5243             :         marking_stack_.pop_back();
    5244    10447180 :         obj->Iterate(this);
    5245             :       }
    5246        1290 :     }
    5247             : 
    5248             :    private:
    5249    23658540 :     void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
    5250             :       MarkPointersImpl(start, end);
    5251    23658540 :     }
    5252             : 
    5253             :     template <typename TSlot>
    5254             :     V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
    5255             :       // Treat weak references as strong.
    5256    91914969 :       for (TSlot p = start; p < end; ++p) {
    5257             :         typename TSlot::TObject object = *p;
    5258             :         HeapObject heap_object;
    5259    64272396 :         if (object.GetHeapObject(&heap_object)) {
    5260             :           MarkHeapObject(heap_object);
    5261             :         }
    5262             :       }
    5263             :     }
    5264             : 
    5265             :     V8_INLINE void MarkHeapObject(HeapObject heap_object) {
    5266    55709271 :       if (filter_->MarkAsReachable(heap_object)) {
    5267    10447180 :         marking_stack_.push_back(heap_object);
    5268             :       }
    5269             :     }
    5270             : 
    5271             :     UnreachableObjectsFilter* filter_;
    5272             :     std::vector<HeapObject> marking_stack_;
    5273             :   };
    5274             : 
    5275             :   friend class MarkingVisitor;
    5276             : 
    5277        1290 :   void MarkReachableObjects() {
    5278             :     MarkingVisitor visitor(this);
    5279        1290 :     heap_->IterateRoots(&visitor, VISIT_ALL);
    5280        1290 :     visitor.TransitiveClosure();
    5281        1290 :   }
    5282             : 
    5283             :   Heap* heap_;
    5284             :   DisallowHeapAllocation no_allocation_;
    5285             :   std::unordered_map<MemoryChunk*,
    5286             :                      std::unordered_set<HeapObject, Object::Hasher>*>
    5287             :       reachable_;
    5288             : };
    5289             : 
    5290        7820 : HeapIterator::HeapIterator(Heap* heap,
    5291             :                            HeapIterator::HeapObjectsFiltering filtering)
    5292             :     : heap_(heap),
    5293             :       filtering_(filtering),
    5294             :       filter_(nullptr),
    5295             :       space_iterator_(nullptr),
    5296        7820 :       object_iterator_(nullptr) {
    5297             :   heap_->MakeHeapIterable();
    5298        7820 :   heap_->heap_iterator_start();
    5299             :   // Start the iteration.
    5300       15640 :   space_iterator_ = new SpaceIterator(heap_);
    5301        7820 :   switch (filtering_) {
    5302             :     case kFilterUnreachable:
    5303        2580 :       filter_ = new UnreachableObjectsFilter(heap_);
    5304        1290 :       break;
    5305             :     default:
    5306             :       break;
    5307             :   }
    5308       23460 :   object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5309        7820 : }
    5310             : 
    5311             : 
    5312       15640 : HeapIterator::~HeapIterator() {
    5313        7820 :   heap_->heap_iterator_end();
    5314             : #ifdef DEBUG
    5315             :   // Assert that in filtering mode we have iterated through all
    5316             :   // objects. Otherwise, heap will be left in an inconsistent state.
    5317             :   if (filtering_ != kNoFiltering) {
    5318             :     DCHECK_NULL(object_iterator_);
    5319             :   }
    5320             : #endif
    5321        7820 :   delete space_iterator_;
    5322        7820 :   delete filter_;
    5323        7820 : }
    5324             : 
    5325    88956900 : HeapObject HeapIterator::next() {
    5326    88956900 :   if (filter_ == nullptr) return NextObject();
    5327             : 
    5328    10448470 :   HeapObject obj = NextObject();
    5329    11036861 :   while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
    5330    10448470 :   return obj;
    5331             : }
    5332             : 
    5333    89545157 : HeapObject HeapIterator::NextObject() {
    5334             :   // No iterator means we are done.
    5335    89545157 :   if (object_iterator_.get() == nullptr) return HeapObject();
    5336             : 
    5337    89545157 :   HeapObject obj = object_iterator_.get()->Next();
    5338    89545238 :   if (!obj.is_null()) {
    5339             :     // If the current iterator has more objects we are fine.
    5340    89507205 :     return obj;
    5341             :   } else {
    5342             :     // Go though the spaces looking for one that has objects.
    5343      125120 :     while (space_iterator_->has_next()) {
    5344      109480 :       object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5345       54740 :       obj = object_iterator_.get()->Next();
    5346       54740 :       if (!obj.is_null()) {
    5347       30213 :         return obj;
    5348             :       }
    5349             :     }
    5350             :   }
    5351             :   // Done with the last space.
    5352             :   object_iterator_.reset(nullptr);
    5353        7820 :   return HeapObject();
    5354             : }
    5355             : 
    5356       94908 : void Heap::UpdateTotalGCTime(double duration) {
    5357       94908 :   if (FLAG_trace_gc_verbose) {
    5358           0 :     total_gc_time_ms_ += duration;
    5359             :   }
    5360       94908 : }
    5361             : 
    5362       73955 : void Heap::ExternalStringTable::CleanUpYoung() {
    5363             :   int last = 0;
    5364       73955 :   Isolate* isolate = heap_->isolate();
    5365       74451 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5366         248 :     Object o = young_strings_[i];
    5367         248 :     if (o->IsTheHole(isolate)) {
    5368         228 :       continue;
    5369             :     }
    5370             :     // The real external string is already in one of these vectors and was or
    5371             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5372          21 :     if (o->IsThinString()) continue;
    5373             :     DCHECK(o->IsExternalString());
    5374          20 :     if (InYoungGeneration(o)) {
    5375          40 :       young_strings_[last++] = o;
    5376             :     } else {
    5377           0 :       old_strings_.push_back(o);
    5378             :     }
    5379             :   }
    5380       73955 :   young_strings_.resize(last);
    5381       73955 : }
    5382             : 
    5383       73955 : void Heap::ExternalStringTable::CleanUpAll() {
    5384       73955 :   CleanUpYoung();
    5385             :   int last = 0;
    5386       73955 :   Isolate* isolate = heap_->isolate();
    5387      279429 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5388      102737 :     Object o = old_strings_[i];
    5389      102737 :     if (o->IsTheHole(isolate)) {
    5390             :       continue;
    5391             :     }
    5392             :     // The real external string is already in one of these vectors and was or
    5393             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5394      101358 :     if (o->IsThinString()) continue;
    5395             :     DCHECK(o->IsExternalString());
    5396             :     DCHECK(!InYoungGeneration(o));
    5397      202716 :     old_strings_[last++] = o;
    5398             :   }
    5399       73955 :   old_strings_.resize(last);
    5400             : #ifdef VERIFY_HEAP
    5401             :   if (FLAG_verify_heap) {
    5402             :     Verify();
    5403             :   }
    5404             : #endif
    5405       73955 : }
    5406             : 
    5407       61518 : void Heap::ExternalStringTable::TearDown() {
    5408       61776 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5409         129 :     Object o = young_strings_[i];
    5410             :     // Dont finalize thin strings.
    5411         129 :     if (o->IsThinString()) continue;
    5412         121 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5413             :   }
    5414             :   young_strings_.clear();
    5415      231856 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5416       85168 :     Object o = old_strings_[i];
    5417             :     // Dont finalize thin strings.
    5418       85168 :     if (o->IsThinString()) continue;
    5419       85168 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5420             :   }
    5421             :   old_strings_.clear();
    5422       61519 : }
    5423             : 
    5424             : 
    5425      927782 : void Heap::RememberUnmappedPage(Address page, bool compacted) {
    5426             :   // Tag the page pointer to make it findable in the dump file.
    5427      927782 :   if (compacted) {
    5428        6986 :     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
    5429             :   } else {
    5430      920796 :     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
    5431             :   }
    5432      989316 :   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
    5433      989316 :   remembered_unmapped_pages_index_++;
    5434      989316 :   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
    5435      927782 : }
    5436             : 
    5437     3421138 : void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
    5438     3421138 :   StrongRootsList* list = new StrongRootsList();
    5439     3421149 :   list->next = strong_roots_list_;
    5440     3421149 :   list->start = start;
    5441     3421149 :   list->end = end;
    5442     3421149 :   strong_roots_list_ = list;
    5443     3421149 : }
    5444             : 
    5445     3421129 : void Heap::UnregisterStrongRoots(FullObjectSlot start) {
    5446             :   StrongRootsList* prev = nullptr;
    5447     3421129 :   StrongRootsList* list = strong_roots_list_;
    5448    10303948 :   while (list != nullptr) {
    5449     6882812 :     StrongRootsList* next = list->next;
    5450     6882812 :     if (list->start == start) {
    5451     3421132 :       if (prev) {
    5452         773 :         prev->next = next;
    5453             :       } else {
    5454     3420359 :         strong_roots_list_ = next;
    5455             :       }
    5456     3421132 :       delete list;
    5457             :     } else {
    5458             :       prev = list;
    5459             :     }
    5460             :     list = next;
    5461             :   }
    5462     3421136 : }
    5463             : 
    5464          56 : void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
    5465             :   set_builtins_constants_table(cache);
    5466          56 : }
    5467             : 
    5468          56 : void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
    5469             :   DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
    5470             :   set_interpreter_entry_trampoline_for_profiling(code);
    5471          56 : }
    5472             : 
    5473         208 : void Heap::AddDirtyJSFinalizationGroup(
    5474             :     JSFinalizationGroup finalization_group,
    5475             :     std::function<void(HeapObject object, ObjectSlot slot, Object target)>
    5476             :         gc_notify_updated_slot) {
    5477             :   DCHECK(dirty_js_finalization_groups()->IsUndefined(isolate()) ||
    5478             :          dirty_js_finalization_groups()->IsJSFinalizationGroup());
    5479             :   DCHECK(finalization_group->next()->IsUndefined(isolate()));
    5480             :   DCHECK(!finalization_group->scheduled_for_cleanup());
    5481         208 :   finalization_group->set_scheduled_for_cleanup(true);
    5482         208 :   finalization_group->set_next(dirty_js_finalization_groups());
    5483             :   gc_notify_updated_slot(
    5484             :       finalization_group,
    5485             :       finalization_group.RawField(JSFinalizationGroup::kNextOffset),
    5486             :       dirty_js_finalization_groups());
    5487             :   set_dirty_js_finalization_groups(finalization_group);
    5488             :   // Roots are rescanned after objects are moved, so no need to record a slot
    5489             :   // for the root pointing to the first JSFinalizationGroup.
    5490         208 : }
    5491             : 
    5492         172 : void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
    5493             :   DCHECK(FLAG_harmony_weak_refs);
    5494             :   DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
    5495             :          weak_refs_keep_during_job()->IsOrderedHashSet());
    5496             :   Handle<OrderedHashSet> table;
    5497         172 :   if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
    5498          82 :     table = isolate()->factory()->NewOrderedHashSet();
    5499             :   } else {
    5500             :     table =
    5501             :         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
    5502             :   }
    5503         172 :   table = OrderedHashSet::Add(isolate(), table, target);
    5504             :   set_weak_refs_keep_during_job(*table);
    5505         172 : }
    5506             : 
    5507      668007 : void Heap::ClearKeepDuringJobSet() {
    5508             :   set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
    5509      668007 : }
    5510             : 
    5511           0 : size_t Heap::NumberOfTrackedHeapObjectTypes() {
    5512           0 :   return ObjectStats::OBJECT_STATS_COUNT;
    5513             : }
    5514             : 
    5515             : 
    5516           0 : size_t Heap::ObjectCountAtLastGC(size_t index) {
    5517           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5518             :     return 0;
    5519           0 :   return live_object_stats_->object_count_last_gc(index);
    5520             : }
    5521             : 
    5522             : 
    5523           0 : size_t Heap::ObjectSizeAtLastGC(size_t index) {
    5524           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5525             :     return 0;
    5526           0 :   return live_object_stats_->object_size_last_gc(index);
    5527             : }
    5528             : 
    5529             : 
    5530           0 : bool Heap::GetObjectTypeName(size_t index, const char** object_type,
    5531             :                              const char** object_sub_type) {
    5532           0 :   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
    5533             : 
    5534           0 :   switch (static_cast<int>(index)) {
    5535             : #define COMPARE_AND_RETURN_NAME(name) \
    5536             :   case name:                          \
    5537             :     *object_type = #name;             \
    5538             :     *object_sub_type = "";            \
    5539             :     return true;
    5540           0 :     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5541             : #undef COMPARE_AND_RETURN_NAME
    5542             : 
    5543             : #define COMPARE_AND_RETURN_NAME(name)                       \
    5544             :   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
    5545             :     *object_type = #name;                                   \
    5546             :     *object_sub_type = "";                                  \
    5547             :     return true;
    5548           0 :     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5549             : #undef COMPARE_AND_RETURN_NAME
    5550             :   }
    5551             :   return false;
    5552             : }
    5553             : 
    5554         246 : size_t Heap::NumberOfNativeContexts() {
    5555             :   int result = 0;
    5556             :   Object context = native_contexts_list();
    5557        2422 :   while (!context->IsUndefined(isolate())) {
    5558        1088 :     ++result;
    5559        1088 :     Context native_context = Context::cast(context);
    5560        1088 :     context = native_context->next_context_link();
    5561             :   }
    5562         246 :   return result;
    5563             : }
    5564             : 
    5565         246 : size_t Heap::NumberOfDetachedContexts() {
    5566             :   // The detached_contexts() array has two entries per detached context.
    5567         246 :   return detached_contexts()->length() / 2;
    5568             : }
    5569             : 
    5570         155 : const char* AllocationSpaceName(AllocationSpace space) {
    5571         155 :   switch (space) {
    5572             :     case NEW_SPACE:
    5573             :       return "NEW_SPACE";
    5574             :     case OLD_SPACE:
    5575           1 :       return "OLD_SPACE";
    5576             :     case CODE_SPACE:
    5577           0 :       return "CODE_SPACE";
    5578             :     case MAP_SPACE:
    5579           2 :       return "MAP_SPACE";
    5580             :     case LO_SPACE:
    5581           0 :       return "LO_SPACE";
    5582             :     case NEW_LO_SPACE:
    5583           0 :       return "NEW_LO_SPACE";
    5584             :     case RO_SPACE:
    5585         152 :       return "RO_SPACE";
    5586             :     default:
    5587           0 :       UNREACHABLE();
    5588             :   }
    5589             :   return nullptr;
    5590             : }
    5591             : 
    5592           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
    5593             :                                           ObjectSlot end) {
    5594           0 :   VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    5595           0 : }
    5596             : 
    5597           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host,
    5598             :                                           MaybeObjectSlot start,
    5599             :                                           MaybeObjectSlot end) {
    5600           0 :   VerifyPointers(host, start, end);
    5601           0 : }
    5602             : 
    5603           0 : void VerifyPointersVisitor::VisitRootPointers(Root root,
    5604             :                                               const char* description,
    5605             :                                               FullObjectSlot start,
    5606             :                                               FullObjectSlot end) {
    5607             :   VerifyPointersImpl(start, end);
    5608           0 : }
    5609             : 
    5610             : void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
    5611           0 :   CHECK(heap_->Contains(heap_object));
    5612           0 :   CHECK(heap_object->map()->IsMap());
    5613             : }
    5614             : 
    5615             : template <typename TSlot>
    5616             : void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
    5617           0 :   for (TSlot slot = start; slot < end; ++slot) {
    5618             :     typename TSlot::TObject object = *slot;
    5619             :     HeapObject heap_object;
    5620           0 :     if (object.GetHeapObject(&heap_object)) {
    5621             :       VerifyHeapObjectImpl(heap_object);
    5622             :     } else {
    5623           0 :       CHECK(object->IsSmi() || object->IsCleared());
    5624             :     }
    5625             :   }
    5626             : }
    5627             : 
    5628           0 : void VerifyPointersVisitor::VerifyPointers(HeapObject host,
    5629             :                                            MaybeObjectSlot start,
    5630             :                                            MaybeObjectSlot end) {
    5631             :   // If this DCHECK fires then you probably added a pointer field
    5632             :   // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
    5633             :   // this by moving that object to POINTER_VISITOR_ID_LIST.
    5634             :   DCHECK_EQ(ObjectFields::kMaybePointers,
    5635             :             Map::ObjectFieldsFrom(host->map()->visitor_id()));
    5636             :   VerifyPointersImpl(start, end);
    5637           0 : }
    5638             : 
    5639           0 : void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
    5640           0 :   Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5641             :   VerifyHeapObjectImpl(target);
    5642           0 : }
    5643             : 
    5644           0 : void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
    5645             :   VerifyHeapObjectImpl(rinfo->target_object());
    5646           0 : }
    5647             : 
    5648           0 : void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
    5649             :                                           FullObjectSlot start,
    5650             :                                           FullObjectSlot end) {
    5651           0 :   for (FullObjectSlot current = start; current < end; ++current) {
    5652           0 :     CHECK((*current)->IsSmi());
    5653             :   }
    5654           0 : }
    5655             : 
    5656           0 : bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
    5657             :   // Object migration is governed by the following rules:
    5658             :   //
    5659             :   // 1) Objects in new-space can be migrated to the old space
    5660             :   //    that matches their target space or they stay in new-space.
    5661             :   // 2) Objects in old-space stay in the same space when migrating.
    5662             :   // 3) Fillers (two or more words) can migrate due to left-trimming of
    5663             :   //    fixed arrays in new-space or old space.
    5664             :   // 4) Fillers (one word) can never migrate, they are skipped by
    5665             :   //    incremental marking explicitly to prevent invalid pattern.
    5666             :   //
    5667             :   // Since this function is used for debugging only, we do not place
    5668             :   // asserts here, but check everything explicitly.
    5669           0 :   if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
    5670             :   InstanceType type = obj->map()->instance_type();
    5671             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    5672             :   AllocationSpace src = chunk->owner()->identity();
    5673           0 :   switch (src) {
    5674             :     case NEW_SPACE:
    5675           0 :       return dst == NEW_SPACE || dst == OLD_SPACE;
    5676             :     case OLD_SPACE:
    5677           0 :       return dst == OLD_SPACE;
    5678             :     case CODE_SPACE:
    5679           0 :       return dst == CODE_SPACE && type == CODE_TYPE;
    5680             :     case MAP_SPACE:
    5681             :     case LO_SPACE:
    5682             :     case CODE_LO_SPACE:
    5683             :     case NEW_LO_SPACE:
    5684             :     case RO_SPACE:
    5685             :       return false;
    5686             :   }
    5687           0 :   UNREACHABLE();
    5688             : }
    5689             : 
    5690           0 : void Heap::CreateObjectStats() {
    5691           0 :   if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
    5692           0 :   if (!live_object_stats_) {
    5693           0 :     live_object_stats_.reset(new ObjectStats(this));
    5694             :   }
    5695           0 :   if (!dead_object_stats_) {
    5696           0 :     dead_object_stats_.reset(new ObjectStats(this));
    5697             :   }
    5698             : }
    5699             : 
    5700    22516153 : void AllocationObserver::AllocationStep(int bytes_allocated,
    5701             :                                         Address soon_object, size_t size) {
    5702             :   DCHECK_GE(bytes_allocated, 0);
    5703    22516153 :   bytes_to_next_step_ -= bytes_allocated;
    5704    22516153 :   if (bytes_to_next_step_ <= 0) {
    5705      140698 :     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
    5706      140698 :     step_size_ = GetNextStepSize();
    5707      140698 :     bytes_to_next_step_ = step_size_;
    5708             :   }
    5709             :   DCHECK_GE(bytes_to_next_step_, 0);
    5710    22516153 : }
    5711             : 
    5712             : namespace {
    5713             : 
    5714     2296294 : Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
    5715             :   MapWord map_word = object->map_word();
    5716             :   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
    5717     4592588 :                                         : map_word.ToMap();
    5718             : }
    5719             : 
    5720             : int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
    5721     2296294 :   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
    5722             : }
    5723             : 
    5724             : Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
    5725             :   Code code = Code::unchecked_cast(object);
    5726             :   DCHECK(!code.is_null());
    5727             :   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
    5728             :   return code;
    5729             : }
    5730             : 
    5731             : }  // namespace
    5732             : 
    5733           0 : bool Heap::GcSafeCodeContains(Code code, Address addr) {
    5734           0 :   Map map = GcSafeMapOfCodeSpaceObject(code);
    5735             :   DCHECK(map == ReadOnlyRoots(this).code_map());
    5736           0 :   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
    5737             :   Address start = code->address();
    5738           0 :   Address end = code->address() + code->SizeFromMap(map);
    5739           0 :   return start <= addr && addr < end;
    5740             : }
    5741             : 
    5742     1093708 : Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
    5743     1093708 :   Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
    5744     1093709 :   if (!code.is_null()) return code;
    5745             : 
    5746             :   // Check if the inner pointer points into a large object chunk.
    5747      537537 :   LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
    5748      537538 :   if (large_page != nullptr) {
    5749             :     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
    5750             :   }
    5751             : 
    5752             :   DCHECK(code_space()->Contains(inner_pointer));
    5753             : 
    5754             :   // Iterate through the page until we reach the end or find an object starting
    5755             :   // after the inner pointer.
    5756             :   Page* page = Page::FromAddress(inner_pointer);
    5757             :   DCHECK_EQ(page->owner(), code_space());
    5758      532094 :   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
    5759             : 
    5760             :   Address addr = page->skip_list()->StartFor(inner_pointer);
    5761             :   Address top = code_space()->top();
    5762             :   Address limit = code_space()->limit();
    5763             : 
    5764             :   while (true) {
    5765     2309274 :     if (addr == top && addr != limit) {
    5766             :       addr = limit;
    5767             :       continue;
    5768             :     }
    5769             : 
    5770             :     HeapObject obj = HeapObject::FromAddress(addr);
    5771             :     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
    5772     2296296 :     Address next_addr = addr + obj_size;
    5773     2296296 :     if (next_addr > inner_pointer) {
    5774             :       return GcSafeCastToCode(this, obj, inner_pointer);
    5775             :     }
    5776             :     addr = next_addr;
    5777             :   }
    5778             : }
    5779             : 
    5780          34 : void Heap::WriteBarrierForCodeSlow(Code code) {
    5781          39 :   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
    5782           5 :        !it.done(); it.next()) {
    5783           5 :     GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5784           5 :     MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5785             :   }
    5786          34 : }
    5787             : 
    5788           0 : void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
    5789             :                                    HeapObject value) {
    5790             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5791             :   heap->store_buffer()->InsertEntry(slot);
    5792           0 : }
    5793             : 
    5794         976 : void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
    5795             :                                               int offset, int length) {
    5796     1387794 :   for (int i = 0; i < length; i++) {
    5797     1386818 :     if (!InYoungGeneration(array->get(offset + i))) continue;
    5798             :     heap->store_buffer()->InsertEntry(
    5799             :         array->RawFieldOfElementAt(offset + i).address());
    5800             :   }
    5801         976 : }
    5802             : 
    5803      220821 : void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5804             :                                           HeapObject object) {
    5805             :   DCHECK(InYoungGeneration(object));
    5806             :   Page* source_page = Page::FromHeapObject(host);
    5807             :   RelocInfo::Mode rmode = rinfo->rmode();
    5808             :   Address addr = rinfo->pc();
    5809             :   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    5810      220821 :   if (rinfo->IsInConstantPool()) {
    5811             :     addr = rinfo->constant_pool_entry_address();
    5812             :     if (RelocInfo::IsCodeTargetMode(rmode)) {
    5813             :       slot_type = CODE_ENTRY_SLOT;
    5814             :     } else {
    5815             :       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    5816             :       slot_type = OBJECT_SLOT;
    5817             :     }
    5818             :   }
    5819      441642 :   uintptr_t offset = addr - source_page->address();
    5820             :   DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
    5821      220821 :   RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
    5822      220821 :                                          static_cast<uint32_t>(offset));
    5823      220821 : }
    5824             : 
    5825           0 : void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
    5826             :                               HeapObject value) {
    5827             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5828   120256400 :   heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
    5829   120256610 :                                                value);
    5830           0 : }
    5831             : 
    5832       16170 : void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
    5833             :   IncrementalMarking::MarkingState* marking_state =
    5834             :       heap->incremental_marking()->marking_state();
    5835       16170 :   if (!marking_state->IsBlack(object)) {
    5836             :     marking_state->WhiteToGrey(object);
    5837             :     marking_state->GreyToBlack(object);
    5838             :   }
    5839       16170 :   heap->incremental_marking()->RevisitObject(object);
    5840       16170 : }
    5841             : 
    5842           0 : void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5843             :                                      HeapObject object) {
    5844             :   Heap* heap = Heap::FromWritableHeapObject(host);
    5845             :   DCHECK(heap->incremental_marking()->IsMarking());
    5846      276662 :   heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
    5847           0 : }
    5848             : 
    5849     8137074 : void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
    5850             :                                                 HeapObject raw_descriptor_array,
    5851             :                                                 int number_of_own_descriptors) {
    5852             :   DCHECK(heap->incremental_marking()->IsMarking());
    5853             :   DescriptorArray descriptor_array =
    5854             :       DescriptorArray::cast(raw_descriptor_array);
    5855             :   int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
    5856     8137074 :   if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
    5857     8137074 :                                         raw_marked) <
    5858             :       number_of_own_descriptors) {
    5859             :     heap->incremental_marking()->VisitDescriptors(host, descriptor_array,
    5860     3455525 :                                                   number_of_own_descriptors);
    5861             :   }
    5862     8137078 : }
    5863             : 
    5864           0 : bool Heap::PageFlagsAreConsistent(HeapObject object) {
    5865             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5866             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5867             :   heap_internals::MemoryChunk* slim_chunk =
    5868             :       heap_internals::MemoryChunk::FromHeapObject(object);
    5869             : 
    5870             :   const bool generation_consistency =
    5871           0 :       chunk->owner()->identity() != NEW_SPACE ||
    5872           0 :       (chunk->InYoungGeneration() && slim_chunk->InYoungGeneration());
    5873             :   const bool marking_consistency =
    5874           0 :       !heap->incremental_marking()->IsMarking() ||
    5875           0 :       (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
    5876             :        slim_chunk->IsMarking());
    5877             : 
    5878           0 :   return generation_consistency && marking_consistency;
    5879             : }
    5880             : 
    5881             : static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
    5882             :                   heap_internals::MemoryChunk::kMarkingBit,
    5883             :               "Incremental marking flag inconsistent");
    5884             : static_assert(MemoryChunk::Flag::FROM_PAGE ==
    5885             :                   heap_internals::MemoryChunk::kFromPageBit,
    5886             :               "From page flag inconsistent");
    5887             : static_assert(MemoryChunk::Flag::TO_PAGE ==
    5888             :                   heap_internals::MemoryChunk::kToPageBit,
    5889             :               "To page flag inconsistent");
    5890             : static_assert(MemoryChunk::kFlagsOffset ==
    5891             :                   heap_internals::MemoryChunk::kFlagsOffset,
    5892             :               "Flag offset inconsistent");
    5893             : static_assert(MemoryChunk::kHeapOffset ==
    5894             :                   heap_internals::MemoryChunk::kHeapOffset,
    5895             :               "Heap offset inconsistent");
    5896             : static_assert(MemoryChunk::kOwnerOffset ==
    5897             :                   heap_internals::MemoryChunk::kOwnerOffset,
    5898             :               "Owner offset inconsistent");
    5899             : 
    5900           5 : void Heap::SetEmbedderStackStateForNextFinalizaton(
    5901             :     EmbedderHeapTracer::EmbedderStackState stack_state) {
    5902             :   local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
    5903           5 :       stack_state);
    5904           5 : }
    5905             : 
    5906             : #ifdef DEBUG
    5907             : void Heap::IncrementObjectCounters() {
    5908             :   isolate_->counters()->objs_since_last_full()->Increment();
    5909             :   isolate_->counters()->objs_since_last_young()->Increment();
    5910             : }
    5911             : #endif  // DEBUG
    5912             : 
    5913             : }  // namespace internal
    5914      120216 : }  // namespace v8

Generated by: LCOV version 1.10