LCOV - code coverage report
Current view: top level - src/heap - heap.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1759 2273 77.4 %
Date: 2019-02-19 Functions: 241 321 75.1 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/heap.h"
       6             : 
       7             : #include <unordered_map>
       8             : #include <unordered_set>
       9             : 
      10             : #include "src/accessors.h"
      11             : #include "src/api-inl.h"
      12             : #include "src/assembler-inl.h"
      13             : #include "src/base/bits.h"
      14             : #include "src/base/once.h"
      15             : #include "src/base/utils/random-number-generator.h"
      16             : #include "src/bootstrapper.h"
      17             : #include "src/compilation-cache.h"
      18             : #include "src/conversions.h"
      19             : #include "src/debug/debug.h"
      20             : #include "src/deoptimizer.h"
      21             : #include "src/feedback-vector.h"
      22             : #include "src/global-handles.h"
      23             : #include "src/heap/array-buffer-collector.h"
      24             : #include "src/heap/array-buffer-tracker-inl.h"
      25             : #include "src/heap/barrier.h"
      26             : #include "src/heap/code-stats.h"
      27             : #include "src/heap/concurrent-marking.h"
      28             : #include "src/heap/embedder-tracing.h"
      29             : #include "src/heap/gc-idle-time-handler.h"
      30             : #include "src/heap/gc-tracer.h"
      31             : #include "src/heap/heap-controller.h"
      32             : #include "src/heap/heap-write-barrier-inl.h"
      33             : #include "src/heap/incremental-marking.h"
      34             : #include "src/heap/mark-compact-inl.h"
      35             : #include "src/heap/mark-compact.h"
      36             : #include "src/heap/memory-reducer.h"
      37             : #include "src/heap/object-stats.h"
      38             : #include "src/heap/objects-visiting-inl.h"
      39             : #include "src/heap/objects-visiting.h"
      40             : #include "src/heap/remembered-set.h"
      41             : #include "src/heap/scavenge-job.h"
      42             : #include "src/heap/scavenger-inl.h"
      43             : #include "src/heap/store-buffer.h"
      44             : #include "src/heap/stress-marking-observer.h"
      45             : #include "src/heap/stress-scavenge-observer.h"
      46             : #include "src/heap/sweeper.h"
      47             : #include "src/interpreter/interpreter.h"
      48             : #include "src/log.h"
      49             : #include "src/microtask-queue.h"
      50             : #include "src/objects/data-handler.h"
      51             : #include "src/objects/free-space-inl.h"
      52             : #include "src/objects/hash-table-inl.h"
      53             : #include "src/objects/maybe-object.h"
      54             : #include "src/objects/shared-function-info.h"
      55             : #include "src/objects/slots-inl.h"
      56             : #include "src/regexp/jsregexp.h"
      57             : #include "src/runtime-profiler.h"
      58             : #include "src/snapshot/embedded-data.h"
      59             : #include "src/snapshot/natives.h"
      60             : #include "src/snapshot/serializer-common.h"
      61             : #include "src/snapshot/snapshot.h"
      62             : #include "src/string-stream.h"
      63             : #include "src/tracing/trace-event.h"
      64             : #include "src/unicode-decoder.h"
      65             : #include "src/unicode-inl.h"
      66             : #include "src/utils-inl.h"
      67             : #include "src/utils.h"
      68             : #include "src/v8.h"
      69             : #include "src/v8threads.h"
      70             : #include "src/vm-state-inl.h"
      71             : 
      72             : // Has to be the last include (doesn't have include guards):
      73             : #include "src/objects/object-macros.h"
      74             : 
      75             : namespace v8 {
      76             : namespace internal {
      77             : 
      78             : // These are outside the Heap class so they can be forward-declared
      79             : // in heap-write-barrier-inl.h.
      80           0 : bool Heap_PageFlagsAreConsistent(HeapObject object) {
      81           0 :   return Heap::PageFlagsAreConsistent(object);
      82             : }
      83             : 
      84   111939469 : void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
      85             :                                   HeapObject value) {
      86             :   Heap::GenerationalBarrierSlow(object, slot, value);
      87   111939466 : }
      88             : 
      89   285501829 : void Heap_MarkingBarrierSlow(HeapObject object, Address slot,
      90             :                              HeapObject value) {
      91             :   Heap::MarkingBarrierSlow(object, slot, value);
      92   285501762 : }
      93             : 
      94          40 : void Heap_WriteBarrierForCodeSlow(Code host) {
      95          40 :   Heap::WriteBarrierForCodeSlow(host);
      96          40 : }
      97             : 
      98      217505 : void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
      99             :                                          HeapObject object) {
     100      217510 :   Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
     101      217505 : }
     102             : 
     103      291888 : void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
     104             :                                     HeapObject object) {
     105             :   Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
     106      291888 : }
     107             : 
     108         963 : void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
     109             :                                              int offset, int length) {
     110         984 :   Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
     111         963 : }
     112             : 
     113         423 : void Heap_MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
     114         535 :   Heap::MarkingBarrierForElementsSlow(heap, object);
     115         423 : }
     116             : 
     117     9252427 : void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
     118             :                                                HeapObject descriptor_array,
     119             :                                                int number_of_own_descriptors) {
     120             :   Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
     121     9252427 :                                              number_of_own_descriptors);
     122     9252429 : }
     123             : 
     124          56 : void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
     125             :   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
     126          56 :   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
     127          56 : }
     128             : 
     129          56 : void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
     130             :   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
     131          56 :   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
     132          56 : }
     133             : 
     134          56 : void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
     135             :   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
     136          56 :   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
     137          56 : }
     138             : 
     139          56 : void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
     140             :   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
     141          56 :   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
     142          56 : }
     143             : 
     144         241 : void Heap::SetSerializedObjects(FixedArray objects) {
     145             :   DCHECK(isolate()->serializer_enabled());
     146         241 :   set_serialized_objects(objects);
     147         241 : }
     148             : 
     149         191 : void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
     150             :   DCHECK(isolate()->serializer_enabled());
     151         191 :   set_serialized_global_proxy_sizes(sizes);
     152         191 : }
     153             : 
     154           0 : bool Heap::GCCallbackTuple::operator==(
     155             :     const Heap::GCCallbackTuple& other) const {
     156           0 :   return other.callback == callback && other.data == data;
     157             : }
     158             : 
     159             : Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
     160             :     const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
     161             : 
     162             : struct Heap::StrongRootsList {
     163             :   FullObjectSlot start;
     164             :   FullObjectSlot end;
     165             :   StrongRootsList* next;
     166             : };
     167             : 
     168      122068 : class IdleScavengeObserver : public AllocationObserver {
     169             :  public:
     170             :   IdleScavengeObserver(Heap& heap, intptr_t step_size)
     171       61049 :       : AllocationObserver(step_size), heap_(heap) {}
     172             : 
     173       32825 :   void Step(int bytes_allocated, Address, size_t) override {
     174       32825 :     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
     175       32825 :   }
     176             : 
     177             :  private:
     178             :   Heap& heap_;
     179             : };
     180             : 
     181       61049 : Heap::Heap()
     182             :     : isolate_(isolate()),
     183             :       initial_max_old_generation_size_(max_old_generation_size_),
     184             :       initial_max_old_generation_size_threshold_(0),
     185             :       initial_old_generation_size_(max_old_generation_size_ /
     186             :                                    kInitalOldGenerationLimitFactor),
     187             :       memory_pressure_level_(MemoryPressureLevel::kNone),
     188             :       old_generation_allocation_limit_(initial_old_generation_size_),
     189             :       global_pretenuring_feedback_(kInitialFeedbackCapacity),
     190             :       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
     191             :       is_current_gc_forced_(false),
     192      488392 :       external_string_table_(this) {
     193             :   // Ensure old_generation_size_ is a multiple of kPageSize.
     194             :   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
     195             : 
     196             :   set_native_contexts_list(Smi::kZero);
     197             :   set_allocation_sites_list(Smi::kZero);
     198             :   // Put a dummy entry in the remembered pages so we can find the list the
     199             :   // minidump even if there are no real unmapped pages.
     200             :   RememberUnmappedPage(kNullAddress, false);
     201       61049 : }
     202             : 
     203             : Heap::~Heap() = default;
     204             : 
     205        1261 : size_t Heap::MaxReserved() {
     206     2635875 :   const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
     207     2635875 :   return static_cast<size_t>(2 * max_semi_space_size_ +
     208     2635875 :                              kMaxNewLargeObjectSpaceSize +
     209     2635875 :                              max_old_generation_size_);
     210             : }
     211             : 
     212       29552 : size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
     213             :   const size_t old_space_physical_memory_factor = 4;
     214       29552 :   size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
     215             :                                              old_space_physical_memory_factor *
     216       29552 :                                              kPointerMultiplier);
     217             :   return Max(Min(computed_size, HeapController::kMaxSize),
     218       29552 :              HeapController::kMinSize);
     219             : }
     220             : 
     221          60 : size_t Heap::Capacity() {
     222          60 :   if (!HasBeenSetUp()) return 0;
     223             : 
     224          60 :   return new_space_->Capacity() + OldGenerationCapacity();
     225             : }
     226             : 
     227     2719669 : size_t Heap::OldGenerationCapacity() {
     228     2719669 :   if (!HasBeenSetUp()) return 0;
     229             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     230             :   size_t total = 0;
     231    13598340 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     232             :        space = spaces.next()) {
     233    10878671 :     total += space->Capacity();
     234             :   }
     235     2719671 :   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
     236             : }
     237             : 
     238      710729 : size_t Heap::CommittedOldGenerationMemory() {
     239      710729 :   if (!HasBeenSetUp()) return 0;
     240             : 
     241             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
     242             :   size_t total = 0;
     243     3553649 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     244             :        space = spaces.next()) {
     245     2842919 :     total += space->CommittedMemory();
     246             :   }
     247      710730 :   return total + lo_space_->Size() + code_lo_space_->Size();
     248             : }
     249             : 
     250           0 : size_t Heap::CommittedMemoryOfUnmapper() {
     251           0 :   if (!HasBeenSetUp()) return 0;
     252             : 
     253           0 :   return memory_allocator()->unmapper()->CommittedBufferedMemory();
     254             : }
     255             : 
     256      561673 : size_t Heap::CommittedMemory() {
     257      561673 :   if (!HasBeenSetUp()) return 0;
     258             : 
     259      561673 :   return new_space_->CommittedMemory() + new_lo_space_->Size() +
     260      561672 :          CommittedOldGenerationMemory();
     261             : }
     262             : 
     263             : 
     264         246 : size_t Heap::CommittedPhysicalMemory() {
     265         246 :   if (!HasBeenSetUp()) return 0;
     266             : 
     267             :   size_t total = 0;
     268        2214 :   for (SpaceIterator it(this); it.has_next();) {
     269        1968 :     total += it.next()->CommittedPhysicalMemory();
     270             :   }
     271             : 
     272         246 :   return total;
     273             : }
     274             : 
     275      128799 : size_t Heap::CommittedMemoryExecutable() {
     276      128799 :   if (!HasBeenSetUp()) return 0;
     277             : 
     278      128799 :   return static_cast<size_t>(memory_allocator()->SizeExecutable());
     279             : }
     280             : 
     281             : 
     282      257034 : void Heap::UpdateMaximumCommitted() {
     283      514068 :   if (!HasBeenSetUp()) return;
     284             : 
     285      257034 :   const size_t current_committed_memory = CommittedMemory();
     286      257034 :   if (current_committed_memory > maximum_committed_) {
     287       93828 :     maximum_committed_ = current_committed_memory;
     288             :   }
     289             : }
     290             : 
     291         306 : size_t Heap::Available() {
     292         306 :   if (!HasBeenSetUp()) return 0;
     293             : 
     294             :   size_t total = 0;
     295             : 
     296        2754 :   for (SpaceIterator it(this); it.has_next();) {
     297        2448 :     total += it.next()->Available();
     298             :   }
     299             : 
     300         306 :   total += memory_allocator()->Available();
     301         306 :   return total;
     302             : }
     303             : 
     304     5151615 : bool Heap::CanExpandOldGeneration(size_t size) {
     305     2575924 :   if (force_oom_) return false;
     306     5151380 :   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
     307             :   // The OldGenerationCapacity does not account compaction spaces used
     308             :   // during evacuation. Ensure that expanding the old generation does push
     309             :   // the total allocated memory size over the maximum heap size.
     310     5147130 :   return memory_allocator()->Size() + size <= MaxReserved();
     311             : }
     312             : 
     313          15 : bool Heap::HasBeenSetUp() {
     314             :   // We will always have a new space when the heap is set up.
     315     6315919 :   return new_space_ != nullptr;
     316             : }
     317             : 
     318             : 
     319       98000 : GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
     320       23495 :                                               const char** reason) {
     321             :   // Is global GC requested?
     322       98000 :   if (space != NEW_SPACE && space != NEW_LO_SPACE) {
     323      147626 :     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
     324       73813 :     *reason = "GC in old space requested";
     325       73813 :     return MARK_COMPACTOR;
     326             :   }
     327             : 
     328       24187 :   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
     329         691 :     *reason = "GC in old space forced by flags";
     330         691 :     return MARK_COMPACTOR;
     331             :   }
     332             : 
     333       23855 :   if (incremental_marking()->NeedsFinalization() &&
     334         359 :       AllocationLimitOvershotByLargeMargin()) {
     335           1 :     *reason = "Incremental marking needs finalization";
     336           1 :     return MARK_COMPACTOR;
     337             :   }
     338             : 
     339             :   // Over-estimate the new space size using capacity to allow some slack.
     340       46990 :   if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
     341       23495 :                               new_lo_space()->Size())) {
     342             :     isolate_->counters()
     343             :         ->gc_compactor_caused_by_oldspace_exhaustion()
     344          10 :         ->Increment();
     345           5 :     *reason = "scavenge might not succeed";
     346           5 :     return MARK_COMPACTOR;
     347             :   }
     348             : 
     349             :   // Default
     350       23490 :   *reason = nullptr;
     351       23490 :   return YoungGenerationCollector();
     352             : }
     353             : 
     354           0 : void Heap::SetGCState(HeapState state) {
     355      257034 :   gc_state_ = state;
     356           0 : }
     357             : 
     358          35 : void Heap::PrintShortHeapStatistics() {
     359          70 :   if (!FLAG_trace_gc_verbose) return;
     360             :   PrintIsolate(isolate_,
     361             :                "Memory allocator,       used: %6" PRIuS
     362             :                " KB,"
     363             :                " available: %6" PRIuS " KB\n",
     364             :                memory_allocator()->Size() / KB,
     365           0 :                memory_allocator()->Available() / KB);
     366             :   PrintIsolate(isolate_,
     367             :                "Read-only space,        used: %6" PRIuS
     368             :                " KB"
     369             :                ", available: %6" PRIuS
     370             :                " KB"
     371             :                ", committed: %6" PRIuS " KB\n",
     372           0 :                read_only_space_->Size() / KB,
     373           0 :                read_only_space_->Available() / KB,
     374           0 :                read_only_space_->CommittedMemory() / KB);
     375             :   PrintIsolate(isolate_,
     376             :                "New space,              used: %6" PRIuS
     377             :                " KB"
     378             :                ", available: %6" PRIuS
     379             :                " KB"
     380             :                ", committed: %6" PRIuS " KB\n",
     381           0 :                new_space_->Size() / KB, new_space_->Available() / KB,
     382           0 :                new_space_->CommittedMemory() / KB);
     383             :   PrintIsolate(isolate_,
     384             :                "New large object space, used: %6" PRIuS
     385             :                " KB"
     386             :                ", available: %6" PRIuS
     387             :                " KB"
     388             :                ", committed: %6" PRIuS " KB\n",
     389           0 :                new_lo_space_->SizeOfObjects() / KB,
     390           0 :                new_lo_space_->Available() / KB,
     391           0 :                new_lo_space_->CommittedMemory() / KB);
     392             :   PrintIsolate(isolate_,
     393             :                "Old space,              used: %6" PRIuS
     394             :                " KB"
     395             :                ", available: %6" PRIuS
     396             :                " KB"
     397             :                ", committed: %6" PRIuS " KB\n",
     398           0 :                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
     399           0 :                old_space_->CommittedMemory() / KB);
     400             :   PrintIsolate(isolate_,
     401             :                "Code space,             used: %6" PRIuS
     402             :                " KB"
     403             :                ", available: %6" PRIuS
     404             :                " KB"
     405             :                ", committed: %6" PRIuS "KB\n",
     406           0 :                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
     407           0 :                code_space_->CommittedMemory() / KB);
     408             :   PrintIsolate(isolate_,
     409             :                "Map space,              used: %6" PRIuS
     410             :                " KB"
     411             :                ", available: %6" PRIuS
     412             :                " KB"
     413             :                ", committed: %6" PRIuS " KB\n",
     414           0 :                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
     415           0 :                map_space_->CommittedMemory() / KB);
     416             :   PrintIsolate(isolate_,
     417             :                "Large object space,     used: %6" PRIuS
     418             :                " KB"
     419             :                ", available: %6" PRIuS
     420             :                " KB"
     421             :                ", committed: %6" PRIuS " KB\n",
     422           0 :                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
     423           0 :                lo_space_->CommittedMemory() / KB);
     424             :   PrintIsolate(isolate_,
     425             :                "Code large object space,     used: %6" PRIuS
     426             :                " KB"
     427             :                ", available: %6" PRIuS
     428             :                " KB"
     429             :                ", committed: %6" PRIuS " KB\n",
     430           0 :                lo_space_->SizeOfObjects() / KB,
     431           0 :                code_lo_space_->Available() / KB,
     432           0 :                code_lo_space_->CommittedMemory() / KB);
     433             :   PrintIsolate(isolate_,
     434             :                "All spaces,             used: %6" PRIuS
     435             :                " KB"
     436             :                ", available: %6" PRIuS
     437             :                " KB"
     438             :                ", committed: %6" PRIuS "KB\n",
     439           0 :                this->SizeOfObjects() / KB, this->Available() / KB,
     440           0 :                this->CommittedMemory() / KB);
     441             :   PrintIsolate(isolate_,
     442             :                "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
     443             :                memory_allocator()->unmapper()->NumberOfCommittedChunks(),
     444           0 :                CommittedMemoryOfUnmapper() / KB);
     445             :   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
     446           0 :                isolate()->isolate_data()->external_memory_ / KB);
     447             :   PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
     448           0 :                backing_store_bytes_ / KB);
     449             :   PrintIsolate(isolate_, "External memory global %zu KB\n",
     450           0 :                external_memory_callback_() / KB);
     451             :   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
     452           0 :                total_gc_time_ms_);
     453             : }
     454             : 
     455           0 : void Heap::ReportStatisticsAfterGC() {
     456           0 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
     457             :        ++i) {
     458           0 :     int count = deferred_counters_[i];
     459           0 :     deferred_counters_[i] = 0;
     460           0 :     while (count > 0) {
     461           0 :       count--;
     462           0 :       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
     463             :     }
     464             :   }
     465           0 : }
     466             : 
     467        8104 : void Heap::AddHeapObjectAllocationTracker(
     468             :     HeapObjectAllocationTracker* tracker) {
     469        8104 :   if (allocation_trackers_.empty()) DisableInlineAllocation();
     470        8104 :   allocation_trackers_.push_back(tracker);
     471        8104 : }
     472             : 
     473        8100 : void Heap::RemoveHeapObjectAllocationTracker(
     474             :     HeapObjectAllocationTracker* tracker) {
     475             :   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
     476             :                                          allocation_trackers_.end(), tracker),
     477        8100 :                              allocation_trackers_.end());
     478        8100 :   if (allocation_trackers_.empty()) EnableInlineAllocation();
     479        8100 : }
     480             : 
     481           0 : void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
     482             :                                   RetainingPathOption option) {
     483           0 :   if (!FLAG_track_retaining_path) {
     484           0 :     PrintF("Retaining path tracking requires --track-retaining-path\n");
     485             :   } else {
     486           0 :     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
     487           0 :     int index = array->length();
     488             :     array = WeakArrayList::AddToEnd(isolate(), array,
     489           0 :                                     MaybeObjectHandle::Weak(object));
     490           0 :     set_retaining_path_targets(*array);
     491             :     DCHECK_EQ(array->length(), index + 1);
     492           0 :     retaining_path_target_option_[index] = option;
     493             :   }
     494           0 : }
     495             : 
     496           0 : bool Heap::IsRetainingPathTarget(HeapObject object,
     497             :                                  RetainingPathOption* option) {
     498           0 :   WeakArrayList targets = retaining_path_targets();
     499             :   int length = targets->length();
     500             :   MaybeObject object_to_check = HeapObjectReference::Weak(object);
     501           0 :   for (int i = 0; i < length; i++) {
     502           0 :     MaybeObject target = targets->Get(i);
     503             :     DCHECK(target->IsWeakOrCleared());
     504           0 :     if (target == object_to_check) {
     505             :       DCHECK(retaining_path_target_option_.count(i));
     506           0 :       *option = retaining_path_target_option_[i];
     507           0 :       return true;
     508             :     }
     509             :   }
     510           0 :   return false;
     511             : }
     512             : 
     513           0 : void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
     514           0 :   PrintF("\n\n\n");
     515           0 :   PrintF("#################################################\n");
     516           0 :   PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
     517           0 :   HeapObject object = target;
     518             :   std::vector<std::pair<HeapObject, bool>> retaining_path;
     519             :   Root root = Root::kUnknown;
     520             :   bool ephemeron = false;
     521             :   while (true) {
     522           0 :     retaining_path.push_back(std::make_pair(object, ephemeron));
     523           0 :     if (option == RetainingPathOption::kTrackEphemeronPath &&
     524             :         ephemeron_retainer_.count(object)) {
     525           0 :       object = ephemeron_retainer_[object];
     526             :       ephemeron = true;
     527           0 :     } else if (retainer_.count(object)) {
     528           0 :       object = retainer_[object];
     529             :       ephemeron = false;
     530             :     } else {
     531           0 :       if (retaining_root_.count(object)) {
     532           0 :         root = retaining_root_[object];
     533             :       }
     534             :       break;
     535             :     }
     536             :   }
     537           0 :   int distance = static_cast<int>(retaining_path.size());
     538           0 :   for (auto node : retaining_path) {
     539           0 :     HeapObject object = node.first;
     540           0 :     bool ephemeron = node.second;
     541           0 :     PrintF("\n");
     542           0 :     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     543             :     PrintF("Distance from root %d%s: ", distance,
     544           0 :            ephemeron ? " (ephemeron)" : "");
     545           0 :     object->ShortPrint();
     546           0 :     PrintF("\n");
     547             : #ifdef OBJECT_PRINT
     548             :     object->Print();
     549             :     PrintF("\n");
     550             : #endif
     551           0 :     --distance;
     552             :   }
     553           0 :   PrintF("\n");
     554           0 :   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
     555           0 :   PrintF("Root: %s\n", RootVisitor::RootName(root));
     556           0 :   PrintF("-------------------------------------------------\n");
     557           0 : }
     558             : 
     559           0 : void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
     560           0 :   if (retainer_.count(object)) return;
     561           0 :   retainer_[object] = retainer;
     562           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     563           0 :   if (IsRetainingPathTarget(object, &option)) {
     564             :     // Check if the retaining path was already printed in
     565             :     // AddEphemeronRetainer().
     566           0 :     if (ephemeron_retainer_.count(object) == 0 ||
     567           0 :         option == RetainingPathOption::kDefault) {
     568           0 :       PrintRetainingPath(object, option);
     569             :     }
     570             :   }
     571             : }
     572             : 
     573           0 : void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
     574           0 :   if (ephemeron_retainer_.count(object)) return;
     575           0 :   ephemeron_retainer_[object] = retainer;
     576           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     577           0 :   if (IsRetainingPathTarget(object, &option) &&
     578           0 :       option == RetainingPathOption::kTrackEphemeronPath) {
     579             :     // Check if the retaining path was already printed in AddRetainer().
     580           0 :     if (retainer_.count(object) == 0) {
     581           0 :       PrintRetainingPath(object, option);
     582             :     }
     583             :   }
     584             : }
     585             : 
     586           0 : void Heap::AddRetainingRoot(Root root, HeapObject object) {
     587           0 :   if (retaining_root_.count(object)) return;
     588           0 :   retaining_root_[object] = root;
     589           0 :   RetainingPathOption option = RetainingPathOption::kDefault;
     590           0 :   if (IsRetainingPathTarget(object, &option)) {
     591           0 :     PrintRetainingPath(object, option);
     592             :   }
     593             : }
     594             : 
     595           0 : void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
     596           0 :   deferred_counters_[feature]++;
     597           0 : }
     598             : 
     599       24459 : bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
     600             : 
     601       98000 : void Heap::GarbageCollectionPrologue() {
     602      392000 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
     603             :   {
     604             :     AllowHeapAllocation for_the_first_part_of_prologue;
     605       98000 :     gc_count_++;
     606             : 
     607             : #ifdef VERIFY_HEAP
     608             :     if (FLAG_verify_heap) {
     609             :       Verify();
     610             :     }
     611             : #endif
     612             :   }
     613             : 
     614             :   // Reset GC statistics.
     615       98000 :   promoted_objects_size_ = 0;
     616       98000 :   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
     617       98000 :   semi_space_copied_object_size_ = 0;
     618       98000 :   nodes_died_in_new_space_ = 0;
     619       98000 :   nodes_copied_in_new_space_ = 0;
     620       98000 :   nodes_promoted_ = 0;
     621             : 
     622       98000 :   UpdateMaximumCommitted();
     623             : 
     624             : #ifdef DEBUG
     625             :   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
     626             : 
     627             :   if (FLAG_gc_verbose) Print();
     628             : #endif  // DEBUG
     629             : 
     630      196000 :   if (new_space_->IsAtMaximumCapacity()) {
     631        3023 :     maximum_size_scavenges_++;
     632             :   } else {
     633       94977 :     maximum_size_scavenges_ = 0;
     634             :   }
     635       98000 :   CheckNewSpaceExpansionCriteria();
     636             :   UpdateNewSpaceAllocationCounter();
     637       98000 :   if (FLAG_track_retaining_path) {
     638             :     retainer_.clear();
     639             :     ephemeron_retainer_.clear();
     640             :     retaining_root_.clear();
     641       98000 :   }
     642       98000 : }
     643             : 
     644      196846 : size_t Heap::SizeOfObjects() {
     645             :   size_t total = 0;
     646             : 
     647     5831820 :   for (SpaceIterator it(this); it.has_next();) {
     648     5183840 :     total += it.next()->SizeOfObjects();
     649             :   }
     650      196846 :   return total;
     651             : }
     652             : 
     653             : 
     654          40 : const char* Heap::GetSpaceName(int idx) {
     655          40 :   switch (idx) {
     656             :     case NEW_SPACE:
     657             :       return "new_space";
     658             :     case OLD_SPACE:
     659           5 :       return "old_space";
     660             :     case MAP_SPACE:
     661           5 :       return "map_space";
     662             :     case CODE_SPACE:
     663           5 :       return "code_space";
     664             :     case LO_SPACE:
     665           5 :       return "large_object_space";
     666             :     case NEW_LO_SPACE:
     667           5 :       return "new_large_object_space";
     668             :     case CODE_LO_SPACE:
     669           5 :       return "code_large_object_space";
     670             :     case RO_SPACE:
     671           5 :       return "read_only_space";
     672             :     default:
     673           0 :       UNREACHABLE();
     674             :   }
     675             :   return nullptr;
     676             : }
     677             : 
     678      115104 : void Heap::MergeAllocationSitePretenuringFeedback(
     679             :     const PretenuringFeedbackMap& local_pretenuring_feedback) {
     680      115104 :   AllocationSite site;
     681      312431 :   for (auto& site_and_count : local_pretenuring_feedback) {
     682       82223 :     site = site_and_count.first;
     683             :     MapWord map_word = site_and_count.first->map_word();
     684       82223 :     if (map_word.IsForwardingAddress()) {
     685         530 :       site = AllocationSite::cast(map_word.ToForwardingAddress());
     686             :     }
     687             : 
     688             :     // We have not validated the allocation site yet, since we have not
     689             :     // dereferenced the site during collecting information.
     690             :     // This is an inlined check of AllocationMemento::IsValid.
     691      164446 :     if (!site->IsAllocationSite() || site->IsZombie()) continue;
     692             : 
     693       82172 :     const int value = static_cast<int>(site_and_count.second);
     694             :     DCHECK_LT(0, value);
     695       82172 :     if (site->IncrementMementoFoundCount(value)) {
     696             :       // For sites in the global map the count is accessed through the site.
     697        3108 :       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
     698             :     }
     699             :   }
     700      115104 : }
     701             : 
     702       30469 : void Heap::AddAllocationObserversToAllSpaces(
     703      243752 :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     704             :   DCHECK(observer && new_space_observer);
     705             : 
     706      274221 :   for (SpaceIterator it(this); it.has_next();) {
     707             :     Space* space = it.next();
     708      243752 :     if (space == new_space()) {
     709       30469 :       space->AddAllocationObserver(new_space_observer);
     710             :     } else {
     711      213283 :       space->AddAllocationObserver(observer);
     712             :     }
     713             :   }
     714       30469 : }
     715             : 
     716          59 : void Heap::RemoveAllocationObserversFromAllSpaces(
     717         472 :     AllocationObserver* observer, AllocationObserver* new_space_observer) {
     718             :   DCHECK(observer && new_space_observer);
     719             : 
     720         531 :   for (SpaceIterator it(this); it.has_next();) {
     721             :     Space* space = it.next();
     722         472 :     if (space == new_space()) {
     723          59 :       space->RemoveAllocationObserver(new_space_observer);
     724             :     } else {
     725         413 :       space->RemoveAllocationObserver(observer);
     726             :     }
     727             :   }
     728          59 : }
     729             : 
     730             : class Heap::SkipStoreBufferScope {
     731             :  public:
     732             :   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
     733             :       : store_buffer_(store_buffer) {
     734       98000 :     store_buffer_->MoveAllEntriesToRememberedSet();
     735       98000 :     store_buffer_->SetMode(StoreBuffer::IN_GC);
     736             :   }
     737             : 
     738             :   ~SkipStoreBufferScope() {
     739             :     DCHECK(store_buffer_->Empty());
     740       98000 :     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
     741             :   }
     742             : 
     743             :  private:
     744             :   StoreBuffer* store_buffer_;
     745             : };
     746             : 
     747             : namespace {
     748        1413 : inline bool MakePretenureDecision(
     749             :     AllocationSite site, AllocationSite::PretenureDecision current_decision,
     750             :     double ratio, bool maximum_size_scavenge) {
     751             :   // Here we just allow state transitions from undecided or maybe tenure
     752             :   // to don't tenure, maybe tenure, or tenure.
     753        2826 :   if ((current_decision == AllocationSite::kUndecided ||
     754        1413 :        current_decision == AllocationSite::kMaybeTenure)) {
     755         957 :     if (ratio >= AllocationSite::kPretenureRatio) {
     756             :       // We just transition into tenure state when the semi-space was at
     757             :       // maximum capacity.
     758         787 :       if (maximum_size_scavenge) {
     759             :         site->set_deopt_dependent_code(true);
     760             :         site->set_pretenure_decision(AllocationSite::kTenure);
     761             :         // Currently we just need to deopt when we make a state transition to
     762             :         // tenure.
     763          48 :         return true;
     764             :       }
     765             :       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
     766             :     } else {
     767             :       site->set_pretenure_decision(AllocationSite::kDontTenure);
     768             :     }
     769             :   }
     770             :   return false;
     771             : }
     772             : 
     773        1413 : inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
     774             :                                       bool maximum_size_scavenge) {
     775             :   bool deopt = false;
     776             :   int create_count = site->memento_create_count();
     777             :   int found_count = site->memento_found_count();
     778             :   bool minimum_mementos_created =
     779        1413 :       create_count >= AllocationSite::kPretenureMinimumCreated;
     780           0 :   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
     781        1413 :                      ? static_cast<double>(found_count) / create_count
     782        2826 :                      : 0.0;
     783             :   AllocationSite::PretenureDecision current_decision =
     784             :       site->pretenure_decision();
     785             : 
     786        1413 :   if (minimum_mementos_created) {
     787             :     deopt = MakePretenureDecision(site, current_decision, ratio,
     788        1413 :                                   maximum_size_scavenge);
     789             :   }
     790             : 
     791        1413 :   if (FLAG_trace_pretenuring_statistics) {
     792             :     PrintIsolate(isolate,
     793             :                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
     794             :                  "(%d, %d, %f) %s => %s\n",
     795             :                  reinterpret_cast<void*>(site.ptr()), create_count, found_count,
     796             :                  ratio, site->PretenureDecisionName(current_decision),
     797           0 :                  site->PretenureDecisionName(site->pretenure_decision()));
     798             :   }
     799             : 
     800             :   // Clear feedback calculation fields until the next gc.
     801             :   site->set_memento_found_count(0);
     802             :   site->set_memento_create_count(0);
     803        1413 :   return deopt;
     804             : }
     805             : }  // namespace
     806             : 
     807           0 : void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
     808             :   global_pretenuring_feedback_.erase(site);
     809           0 : }
     810             : 
     811           0 : bool Heap::DeoptMaybeTenuredAllocationSites() {
     812      196000 :   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
     813             : }
     814             : 
     815      196000 : void Heap::ProcessPretenuringFeedback() {
     816       98000 :   bool trigger_deoptimization = false;
     817       98000 :   if (FLAG_allocation_site_pretenuring) {
     818             :     int tenure_decisions = 0;
     819             :     int dont_tenure_decisions = 0;
     820             :     int allocation_mementos_found = 0;
     821       98000 :     int allocation_sites = 0;
     822             :     int active_allocation_sites = 0;
     823             : 
     824       98000 :     AllocationSite site;
     825             : 
     826             :     // Step 1: Digest feedback for recorded allocation sites.
     827             :     bool maximum_size_scavenge = MaximumSizeScavenge();
     828      197413 :     for (auto& site_and_count : global_pretenuring_feedback_) {
     829        1413 :       allocation_sites++;
     830        1413 :       site = site_and_count.first;
     831             :       // Count is always access through the site.
     832             :       DCHECK_EQ(0, site_and_count.second);
     833             :       int found_count = site->memento_found_count();
     834             :       // An entry in the storage does not imply that the count is > 0 because
     835             :       // allocation sites might have been reset due to too many objects dying
     836             :       // in old space.
     837        1413 :       if (found_count > 0) {
     838             :         DCHECK(site->IsAllocationSite());
     839        1413 :         active_allocation_sites++;
     840        1413 :         allocation_mementos_found += found_count;
     841        1413 :         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
     842          48 :           trigger_deoptimization = true;
     843             :         }
     844        1413 :         if (site->GetPretenureMode() == TENURED) {
     845          54 :           tenure_decisions++;
     846             :         } else {
     847        1359 :           dont_tenure_decisions++;
     848             :         }
     849             :       }
     850             :     }
     851             : 
     852             :     // Step 2: Deopt maybe tenured allocation sites if necessary.
     853             :     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
     854       98000 :     if (deopt_maybe_tenured) {
     855             :       ForeachAllocationSite(
     856             :           allocation_sites_list(),
     857             :           [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
     858             :             DCHECK(site->IsAllocationSite());
     859         302 :             allocation_sites++;
     860         302 :             if (site->IsMaybeTenure()) {
     861             :               site->set_deopt_dependent_code(true);
     862          19 :               trigger_deoptimization = true;
     863             :             }
     864         260 :           });
     865             :     }
     866             : 
     867       98000 :     if (trigger_deoptimization) {
     868          35 :       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
     869             :     }
     870             : 
     871       98000 :     if (FLAG_trace_pretenuring_statistics &&
     872           0 :         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
     873             :          dont_tenure_decisions > 0)) {
     874             :       PrintIsolate(isolate(),
     875             :                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
     876             :                    "active_sites=%d "
     877             :                    "mementos=%d tenured=%d not_tenured=%d\n",
     878             :                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
     879             :                    active_allocation_sites, allocation_mementos_found,
     880           0 :                    tenure_decisions, dont_tenure_decisions);
     881             :     }
     882             : 
     883             :     global_pretenuring_feedback_.clear();
     884             :     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
     885             :   }
     886       98000 : }
     887             : 
     888      305899 : void Heap::InvalidateCodeDeoptimizationData(Code code) {
     889             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
     890      305899 :   CodePageMemoryModificationScope modification_scope(chunk);
     891      305899 :   code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
     892      305899 : }
     893             : 
     894          35 : void Heap::DeoptMarkedAllocationSites() {
     895             :   // TODO(hpayer): If iterating over the allocation sites list becomes a
     896             :   // performance issue, use a cache data structure in heap instead.
     897             : 
     898         153 :   ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
     899         153 :     if (site->deopt_dependent_code()) {
     900             :       site->dependent_code()->MarkCodeForDeoptimization(
     901          67 :           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
     902             :       site->set_deopt_dependent_code(false);
     903             :     }
     904         223 :   });
     905             : 
     906          35 :   Deoptimizer::DeoptimizeMarkedCode(isolate_);
     907          35 : }
     908             : 
     909             : 
     910     2855006 : void Heap::GarbageCollectionEpilogue() {
     911      392000 :   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
     912       98000 :   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
     913           0 :     ZapFromSpace();
     914             :   }
     915             : 
     916             : #ifdef VERIFY_HEAP
     917             :   if (FLAG_verify_heap) {
     918             :     Verify();
     919             :   }
     920             : #endif
     921             : 
     922             :   AllowHeapAllocation for_the_rest_of_the_epilogue;
     923             : 
     924             : #ifdef DEBUG
     925             :   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
     926             :   if (FLAG_print_handles) PrintHandles();
     927             :   if (FLAG_gc_verbose) Print();
     928             :   if (FLAG_code_stats) ReportCodeStatistics("After GC");
     929             :   if (FLAG_check_handle_count) CheckHandleCount();
     930             : #endif
     931             : 
     932       98000 :   UpdateMaximumCommitted();
     933             : 
     934             :   isolate_->counters()->alive_after_last_gc()->Set(
     935      196000 :       static_cast<int>(SizeOfObjects()));
     936             : 
     937             :   isolate_->counters()->string_table_capacity()->Set(
     938      196000 :       string_table()->Capacity());
     939             :   isolate_->counters()->number_of_symbols()->Set(
     940      196000 :       string_table()->NumberOfElements());
     941             : 
     942       98000 :   if (CommittedMemory() > 0) {
     943             :     isolate_->counters()->external_fragmentation_total()->AddSample(
     944      196000 :         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
     945             : 
     946             :     isolate_->counters()->heap_sample_total_committed()->AddSample(
     947      196000 :         static_cast<int>(CommittedMemory() / KB));
     948             :     isolate_->counters()->heap_sample_total_used()->AddSample(
     949      196000 :         static_cast<int>(SizeOfObjects() / KB));
     950             :     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
     951      196000 :         static_cast<int>(map_space()->CommittedMemory() / KB));
     952             :     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
     953      196000 :         static_cast<int>(code_space()->CommittedMemory() / KB));
     954             : 
     955             :     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
     956      196000 :         static_cast<int>(MaximumCommittedMemory() / KB));
     957             :   }
     958             : 
     959             : #define UPDATE_COUNTERS_FOR_SPACE(space)                \
     960             :   isolate_->counters()->space##_bytes_available()->Set( \
     961             :       static_cast<int>(space()->Available()));          \
     962             :   isolate_->counters()->space##_bytes_committed()->Set( \
     963             :       static_cast<int>(space()->CommittedMemory()));    \
     964             :   isolate_->counters()->space##_bytes_used()->Set(      \
     965             :       static_cast<int>(space()->SizeOfObjects()));
     966             : #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
     967             :   if (space()->CommittedMemory() > 0) {                                \
     968             :     isolate_->counters()->external_fragmentation_##space()->AddSample( \
     969             :         static_cast<int>(100 -                                         \
     970             :                          (space()->SizeOfObjects() * 100.0) /          \
     971             :                              space()->CommittedMemory()));             \
     972             :   }
     973             : #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
     974             :   UPDATE_COUNTERS_FOR_SPACE(space)                         \
     975             :   UPDATE_FRAGMENTATION_FOR_SPACE(space)
     976             : 
     977      588000 :   UPDATE_COUNTERS_FOR_SPACE(new_space)
     978      980000 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
     979      980000 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
     980      980000 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
     981      705509 :   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
     982             : #undef UPDATE_COUNTERS_FOR_SPACE
     983             : #undef UPDATE_FRAGMENTATION_FOR_SPACE
     984             : #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
     985             : 
     986             : #ifdef DEBUG
     987             :   ReportStatisticsAfterGC();
     988             : #endif  // DEBUG
     989             : 
     990       98000 :   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
     991             : 
     992             :   {
     993      392000 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
     994      196000 :     ReduceNewSpaceSize();
     995             :   }
     996             : 
     997       98000 :   if (FLAG_harmony_weak_refs) {
     998             :     // TODO(marja): (spec): The exact condition on when to schedule the cleanup
     999             :     // task is unclear. This version schedules the cleanup task for a
    1000             :     // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
    1001             :     // for it (at that point it might have leftover dirty WeakCells since an
    1002             :     // earlier invocation of the cleanup function didn't iterate through
    1003             :     // them). See https://github.com/tc39/proposal-weakrefs/issues/34
    1004             :     HandleScope handle_scope(isolate());
    1005        1206 :     while (!isolate()->heap()->dirty_js_finalization_groups()->IsUndefined(
    1006        1206 :         isolate())) {
    1007             :       // Enqueue one microtask per JSFinalizationGroup.
    1008             :       Handle<JSFinalizationGroup> finalization_group(
    1009             :           JSFinalizationGroup::cast(
    1010             :               isolate()->heap()->dirty_js_finalization_groups()),
    1011         207 :           isolate());
    1012             :       isolate()->heap()->set_dirty_js_finalization_groups(
    1013         207 :           finalization_group->next());
    1014         414 :       finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
    1015             :       Handle<NativeContext> context(finalization_group->native_context(),
    1016         414 :                                     isolate());
    1017             :       // GC has no native context, but we use the creation context of the
    1018             :       // JSFinalizationGroup for the EnqueueTask operation. This is consitent
    1019             :       // with the Promise implementation, assuming the JSFinalizationGroup's
    1020             :       // creation context is the "caller's context" in promise functions. An
    1021             :       // alternative would be to use the native context of the cleanup
    1022             :       // function. This difference shouldn't be observable from JavaScript,
    1023             :       // since we enter the native context of the cleanup function before
    1024             :       // calling it. TODO(marja): Revisit when the spec clarifies this. See also
    1025             :       // https://github.com/tc39/proposal-weakrefs/issues/38 .
    1026             :       Handle<FinalizationGroupCleanupJobTask> task =
    1027             :           isolate()->factory()->NewFinalizationGroupCleanupJobTask(
    1028         207 :               finalization_group);
    1029         414 :       context->microtask_queue()->EnqueueMicrotask(*task);
    1030             :     }
    1031       98000 :   }
    1032       98000 : }
    1033             : 
    1034             : class GCCallbacksScope {
    1035             :  public:
    1036             :   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
    1037      232992 :     heap_->gc_callbacks_depth_++;
    1038             :   }
    1039      232992 :   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
    1040             : 
    1041       98000 :   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
    1042             : 
    1043             :  private:
    1044             :   Heap* heap_;
    1045             : };
    1046             : 
    1047             : 
    1048       17111 : void Heap::HandleGCRequest() {
    1049       17111 :   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
    1050             :     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
    1051           0 :     stress_scavenge_observer_->RequestedGCDone();
    1052       17111 :   } else if (HighMemoryPressure()) {
    1053             :     incremental_marking()->reset_request_type();
    1054           5 :     CheckMemoryPressure();
    1055       17106 :   } else if (incremental_marking()->request_type() ==
    1056             :              IncrementalMarking::COMPLETE_MARKING) {
    1057             :     incremental_marking()->reset_request_type();
    1058             :     CollectAllGarbage(current_gc_flags_,
    1059             :                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
    1060        6230 :                       current_gc_callback_flags_);
    1061       10876 :   } else if (incremental_marking()->request_type() ==
    1062       10848 :                  IncrementalMarking::FINALIZATION &&
    1063       21724 :              incremental_marking()->IsMarking() &&
    1064       10848 :              !incremental_marking()->finalize_marking_completed()) {
    1065             :     incremental_marking()->reset_request_type();
    1066             :     FinalizeIncrementalMarkingIncrementally(
    1067       10848 :         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
    1068             :   }
    1069       17111 : }
    1070             : 
    1071             : 
    1072           0 : void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
    1073             :   DCHECK(FLAG_idle_time_scavenge);
    1074             :   DCHECK_NOT_NULL(scavenge_job_);
    1075       32825 :   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
    1076           0 : }
    1077             : 
    1078      122864 : TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
    1079       98000 :   if (IsYoungGenerationCollector(collector)) {
    1080       98000 :     if (isolate_->IsIsolateInBackground()) {
    1081           0 :       return isolate_->counters()->gc_scavenger_background();
    1082             :     }
    1083       23490 :     return isolate_->counters()->gc_scavenger_foreground();
    1084             :   } else {
    1085       74510 :     if (!incremental_marking()->IsStopped()) {
    1086       24864 :       if (ShouldReduceMemory()) {
    1087        5120 :         if (isolate_->IsIsolateInBackground()) {
    1088           0 :           return isolate_->counters()->gc_finalize_reduce_memory_background();
    1089             :         }
    1090        2560 :         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
    1091             :       } else {
    1092       44608 :         if (isolate_->IsIsolateInBackground()) {
    1093           0 :           return isolate_->counters()->gc_finalize_background();
    1094             :         }
    1095       22304 :         return isolate_->counters()->gc_finalize_foreground();
    1096             :       }
    1097             :     } else {
    1098       99292 :       if (isolate_->IsIsolateInBackground()) {
    1099           0 :         return isolate_->counters()->gc_compactor_background();
    1100             :       }
    1101       49646 :       return isolate_->counters()->gc_compactor_foreground();
    1102             :     }
    1103             :   }
    1104             : }
    1105             : 
    1106      122864 : TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
    1107       98000 :   if (IsYoungGenerationCollector(collector)) {
    1108       46980 :     return isolate_->counters()->gc_scavenger();
    1109             :   } else {
    1110       74510 :     if (!incremental_marking()->IsStopped()) {
    1111       24864 :       if (ShouldReduceMemory()) {
    1112        5120 :         return isolate_->counters()->gc_finalize_reduce_memory();
    1113             :       } else {
    1114       44608 :         return isolate_->counters()->gc_finalize();
    1115             :       }
    1116             :     } else {
    1117       99292 :       return isolate_->counters()->gc_compactor();
    1118             :     }
    1119             :   }
    1120             : }
    1121             : 
    1122        4117 : void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
    1123             :                              const v8::GCCallbackFlags gc_callback_flags) {
    1124             :   // Since we are ignoring the return value, the exact choice of space does
    1125             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1126             :   // cause a full GC.
    1127             :   set_current_gc_flags(flags);
    1128       70570 :   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
    1129             :   set_current_gc_flags(kNoGCFlags);
    1130        4117 : }
    1131             : 
    1132             : namespace {
    1133             : 
    1134             : intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
    1135           0 :   int slots = size / kTaggedSize;
    1136             :   DCHECK_EQ(a->Size(), size);
    1137             :   DCHECK_EQ(b->Size(), size);
    1138           0 :   Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
    1139           0 :   Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
    1140           0 :   for (int i = 0; i < slots; i++) {
    1141           0 :     if (*slot_a != *slot_b) {
    1142           0 :       return *slot_a - *slot_b;
    1143             :     }
    1144           0 :     slot_a++;
    1145           0 :     slot_b++;
    1146             :   }
    1147             :   return 0;
    1148             : }
    1149             : 
    1150           0 : void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
    1151           0 :   if (objects.size() == 0) return;
    1152             : 
    1153           0 :   sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
    1154           0 :     intptr_t c = CompareWords(size, a, b);
    1155           0 :     if (c != 0) return c < 0;
    1156           0 :     return a < b;
    1157           0 :   });
    1158             : 
    1159             :   std::vector<std::pair<int, HeapObject>> duplicates;
    1160           0 :   HeapObject current = objects[0];
    1161             :   int count = 1;
    1162           0 :   for (size_t i = 1; i < objects.size(); i++) {
    1163           0 :     if (CompareWords(size, current, objects[i]) == 0) {
    1164           0 :       count++;
    1165             :     } else {
    1166           0 :       if (count > 1) {
    1167           0 :         duplicates.push_back(std::make_pair(count - 1, current));
    1168             :       }
    1169             :       count = 1;
    1170           0 :       current = objects[i];
    1171             :     }
    1172             :   }
    1173           0 :   if (count > 1) {
    1174           0 :     duplicates.push_back(std::make_pair(count - 1, current));
    1175             :   }
    1176             : 
    1177           0 :   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
    1178             : 
    1179           0 :   sort(duplicates.begin(), duplicates.end());
    1180           0 :   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
    1181           0 :     int duplicate_bytes = it->first * size;
    1182           0 :     if (duplicate_bytes < threshold) break;
    1183             :     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
    1184           0 :            duplicate_bytes / KB);
    1185           0 :     PrintF("Sample object: ");
    1186           0 :     it->second->Print();
    1187           0 :     PrintF("============================\n");
    1188             :   }
    1189             : }
    1190             : }  // anonymous namespace
    1191             : 
    1192        1258 : void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
    1193             :   // Since we are ignoring the return value, the exact choice of space does
    1194             :   // not matter, so long as we do not specify NEW_SPACE, which would not
    1195             :   // cause a full GC.
    1196             :   // Major GC would invoke weak handle callbacks on weakly reachable
    1197             :   // handles, but won't collect weakly reachable objects until next
    1198             :   // major GC.  Therefore if we collect aggressively and weak handle callback
    1199             :   // has been invoked, we rerun major GC to release objects which become
    1200             :   // garbage.
    1201             :   // Note: as weak callbacks can execute arbitrary code, we cannot
    1202             :   // hope that eventually there will be no weak callbacks invocations.
    1203             :   // Therefore stop recollecting after several attempts.
    1204        1258 :   if (gc_reason == GarbageCollectionReason::kLastResort) {
    1205          22 :     InvokeNearHeapLimitCallback();
    1206             :   }
    1207             :   RuntimeCallTimerScope runtime_timer(
    1208        1258 :       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
    1209             : 
    1210             :   // The optimizing compiler may be unnecessarily holding on to memory.
    1211        1258 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1212        1258 :   isolate()->ClearSerializerData();
    1213             :   set_current_gc_flags(kReduceMemoryFootprintMask);
    1214        1258 :   isolate_->compilation_cache()->Clear();
    1215             :   const int kMaxNumberOfAttempts = 7;
    1216             :   const int kMinNumberOfAttempts = 2;
    1217             :   const v8::GCCallbackFlags callback_flags =
    1218             :       gc_reason == GarbageCollectionReason::kLowMemoryNotification
    1219             :           ? v8::kGCCallbackFlagForced
    1220        1258 :           : v8::kGCCallbackFlagCollectAllAvailableGarbage;
    1221        2559 :   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
    1222        2559 :     if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
    1223             :         attempt + 1 >= kMinNumberOfAttempts) {
    1224             :       break;
    1225             :     }
    1226             :   }
    1227             : 
    1228             :   set_current_gc_flags(kNoGCFlags);
    1229        1258 :   new_space_->Shrink();
    1230        2516 :   new_lo_space_->SetCapacity(new_space_->Capacity());
    1231             :   UncommitFromSpace();
    1232        1258 :   EagerlyFreeExternalMemory();
    1233             : 
    1234        1258 :   if (FLAG_trace_duplicate_threshold_kb) {
    1235             :     std::map<int, std::vector<HeapObject>> objects_by_size;
    1236             :     PagedSpaces spaces(this);
    1237           0 :     for (PagedSpace* space = spaces.next(); space != nullptr;
    1238             :          space = spaces.next()) {
    1239           0 :       HeapObjectIterator it(space);
    1240           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1241           0 :         objects_by_size[obj->Size()].push_back(obj);
    1242             :       }
    1243             :     }
    1244             :     {
    1245           0 :       LargeObjectIterator it(lo_space());
    1246           0 :       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    1247           0 :         objects_by_size[obj->Size()].push_back(obj);
    1248             :       }
    1249             :     }
    1250           0 :     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
    1251             :          ++it) {
    1252           0 :       ReportDuplicates(it->first, it->second);
    1253             :     }
    1254             :   }
    1255        1258 : }
    1256             : 
    1257       33525 : void Heap::PreciseCollectAllGarbage(int flags,
    1258             :                                     GarbageCollectionReason gc_reason,
    1259             :                                     const GCCallbackFlags gc_callback_flags) {
    1260       33525 :   if (!incremental_marking()->IsStopped()) {
    1261             :     FinalizeIncrementalMarkingAtomically(gc_reason);
    1262             :   }
    1263             :   CollectAllGarbage(flags, gc_reason, gc_callback_flags);
    1264       33525 : }
    1265             : 
    1266      970594 : void Heap::ReportExternalMemoryPressure() {
    1267             :   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
    1268             :       static_cast<GCCallbackFlags>(
    1269             :           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
    1270             :           kGCCallbackFlagCollectAllExternalMemory);
    1271     1941188 :   if (isolate()->isolate_data()->external_memory_ >
    1272     1941188 :       (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
    1273             :        external_memory_hard_limit())) {
    1274             :     CollectAllGarbage(
    1275             :         kReduceMemoryFootprintMask,
    1276             :         GarbageCollectionReason::kExternalMemoryPressure,
    1277             :         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
    1278             :                                      kGCCallbackFlagsForExternalMemory));
    1279      970594 :     return;
    1280             :   }
    1281      969678 :   if (incremental_marking()->IsStopped()) {
    1282        1355 :     if (incremental_marking()->CanBeActivated()) {
    1283             :       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
    1284             :                               GarbageCollectionReason::kExternalMemoryPressure,
    1285             :                               kGCCallbackFlagsForExternalMemory);
    1286             :     } else {
    1287             :       CollectAllGarbage(i::Heap::kNoGCFlags,
    1288             :                         GarbageCollectionReason::kExternalMemoryPressure,
    1289             :                         kGCCallbackFlagsForExternalMemory);
    1290             :     }
    1291             :   } else {
    1292             :     // Incremental marking is turned on an has already been started.
    1293             :     const double kMinStepSize = 5;
    1294             :     const double kMaxStepSize = 10;
    1295             :     const double ms_step = Min(
    1296             :         kMaxStepSize,
    1297             :         Max(kMinStepSize,
    1298      968323 :             static_cast<double>(isolate()->isolate_data()->external_memory_) /
    1299             :                 isolate()->isolate_data()->external_memory_limit_ *
    1300      968323 :                 kMinStepSize));
    1301      968323 :     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
    1302             :     // Extend the gc callback flags with external memory flags.
    1303             :     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
    1304      968323 :         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
    1305             :     incremental_marking()->AdvanceWithDeadline(
    1306      968323 :         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
    1307             :   }
    1308             : }
    1309             : 
    1310       98000 : void Heap::EnsureFillerObjectAtTop() {
    1311             :   // There may be an allocation memento behind objects in new space. Upon
    1312             :   // evacuation of a non-full new space (or if we are on the last page) there
    1313             :   // may be uninitialized memory behind top. We fill the remainder of the page
    1314             :   // with a filler.
    1315       98000 :   Address to_top = new_space_->top();
    1316       98000 :   Page* page = Page::FromAddress(to_top - kTaggedSize);
    1317       98000 :   if (page->Contains(to_top)) {
    1318       96219 :     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
    1319       96219 :     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
    1320             :   }
    1321       98000 : }
    1322             : 
    1323       98000 : bool Heap::CollectGarbage(AllocationSpace space,
    1324             :                           GarbageCollectionReason gc_reason,
    1325      196000 :                           const v8::GCCallbackFlags gc_callback_flags) {
    1326       98000 :   const char* collector_reason = nullptr;
    1327       98000 :   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
    1328       98000 :   is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
    1329             : 
    1330       98000 :   if (!CanExpandOldGeneration(new_space()->Capacity() +
    1331       98000 :                               new_lo_space()->Size())) {
    1332          61 :     InvokeNearHeapLimitCallback();
    1333             :   }
    1334             : 
    1335             :   // Ensure that all pending phantom callbacks are invoked.
    1336       98000 :   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
    1337             : 
    1338             :   // The VM is in the GC state until exiting this function.
    1339             :   VMState<GC> state(isolate());
    1340             : 
    1341             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    1342             :   // Reset the allocation timeout, but make sure to allow at least a few
    1343             :   // allocations after a collection. The reason for this is that we have a lot
    1344             :   // of allocation sequences and we assume that a garbage collection will allow
    1345             :   // the subsequent allocation attempts to go through.
    1346             :   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
    1347             :     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
    1348             :   }
    1349             : #endif
    1350             : 
    1351       98000 :   EnsureFillerObjectAtTop();
    1352             : 
    1353      121490 :   if (IsYoungGenerationCollector(collector) &&
    1354             :       !incremental_marking()->IsStopped()) {
    1355         852 :     if (FLAG_trace_incremental_marking) {
    1356             :       isolate()->PrintWithTimestamp(
    1357           0 :           "[IncrementalMarking] Scavenge during marking.\n");
    1358             :     }
    1359             :   }
    1360             : 
    1361             :   bool next_gc_likely_to_collect_more = false;
    1362             :   size_t committed_memory_before = 0;
    1363             : 
    1364       98000 :   if (collector == MARK_COMPACTOR) {
    1365       74510 :     committed_memory_before = CommittedOldGenerationMemory();
    1366             :   }
    1367             : 
    1368             :   {
    1369      196000 :     tracer()->Start(collector, gc_reason, collector_reason);
    1370             :     DCHECK(AllowHeapAllocation::IsAllowed());
    1371             :     DisallowHeapAllocation no_allocation_during_gc;
    1372       98000 :     GarbageCollectionPrologue();
    1373             : 
    1374             :     {
    1375       98000 :       TimedHistogram* gc_type_timer = GCTypeTimer(collector);
    1376      196000 :       TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
    1377      294000 :       TRACE_EVENT0("v8", gc_type_timer->name());
    1378             : 
    1379       98000 :       TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
    1380             :       OptionalTimedHistogramScopeMode mode =
    1381       98000 :           isolate_->IsMemorySavingsModeActive()
    1382             :               ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
    1383       98000 :               : OptionalTimedHistogramScopeMode::TAKE_TIME;
    1384             :       OptionalTimedHistogramScope histogram_timer_priority_scope(
    1385       98000 :           gc_type_priority_timer, isolate_, mode);
    1386             : 
    1387             :       next_gc_likely_to_collect_more =
    1388       98000 :           PerformGarbageCollection(collector, gc_callback_flags);
    1389       98000 :       if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
    1390       98000 :         tracer()->RecordGCPhasesHistograms(gc_type_timer);
    1391             :       }
    1392             :     }
    1393             : 
    1394             :     // Clear is_current_gc_forced now that the current GC is complete. Do this
    1395             :     // before GarbageCollectionEpilogue() since that could trigger another
    1396             :     // unforced GC.
    1397       98000 :     is_current_gc_forced_ = false;
    1398             : 
    1399       98000 :     GarbageCollectionEpilogue();
    1400       98000 :     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
    1401       74510 :       isolate()->CheckDetachedContextsAfterGC();
    1402             :     }
    1403             : 
    1404       98000 :     if (collector == MARK_COMPACTOR) {
    1405       74510 :       size_t committed_memory_after = CommittedOldGenerationMemory();
    1406       74510 :       size_t used_memory_after = OldGenerationSizeOfObjects();
    1407             :       MemoryReducer::Event event;
    1408       74510 :       event.type = MemoryReducer::kMarkCompact;
    1409       74510 :       event.time_ms = MonotonicallyIncreasingTimeInMs();
    1410             :       // Trigger one more GC if
    1411             :       // - this GC decreased committed memory,
    1412             :       // - there is high fragmentation,
    1413             :       // - there are live detached contexts.
    1414             :       event.next_gc_likely_to_collect_more =
    1415      148295 :           (committed_memory_before > committed_memory_after + MB) ||
    1416      148295 :           HasHighFragmentation(used_memory_after, committed_memory_after) ||
    1417      148295 :           (detached_contexts()->length() > 0);
    1418       74510 :       event.committed_memory = committed_memory_after;
    1419       74510 :       if (deserialization_complete_) {
    1420       74510 :         memory_reducer_->NotifyMarkCompact(event);
    1421             :       }
    1422       74558 :       if (initial_max_old_generation_size_ < max_old_generation_size_ &&
    1423          48 :           used_memory_after < initial_max_old_generation_size_threshold_) {
    1424           4 :         max_old_generation_size_ = initial_max_old_generation_size_;
    1425             :       }
    1426             :     }
    1427             : 
    1428       98000 :     tracer()->Stop(collector);
    1429             :   }
    1430             : 
    1431      172510 :   if (collector == MARK_COMPACTOR &&
    1432       74510 :       (gc_callback_flags & (kGCCallbackFlagForced |
    1433             :                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
    1434       33684 :     isolate()->CountUsage(v8::Isolate::kForcedGC);
    1435             :   }
    1436             : 
    1437             :   // Start incremental marking for the next cycle. We do this only for scavenger
    1438             :   // to avoid a loop where mark-compact causes another mark-compact.
    1439       98000 :   if (IsYoungGenerationCollector(collector)) {
    1440             :     StartIncrementalMarkingIfAllocationLimitIsReached(
    1441             :         GCFlagsForIncrementalMarking(),
    1442       23490 :         kGCCallbackScheduleIdleGarbageCollection);
    1443             :   }
    1444             : 
    1445       98000 :   return next_gc_likely_to_collect_more;
    1446             : }
    1447             : 
    1448             : 
    1449         650 : int Heap::NotifyContextDisposed(bool dependant_context) {
    1450         650 :   if (!dependant_context) {
    1451          10 :     tracer()->ResetSurvivalEvents();
    1452          10 :     old_generation_size_configured_ = false;
    1453          10 :     old_generation_allocation_limit_ = initial_old_generation_size_;
    1454             :     MemoryReducer::Event event;
    1455          10 :     event.type = MemoryReducer::kPossibleGarbage;
    1456          10 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    1457          10 :     memory_reducer_->NotifyPossibleGarbage(event);
    1458             :   }
    1459         650 :   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    1460             : 
    1461        1300 :   number_of_disposed_maps_ = retained_maps()->length();
    1462        1300 :   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
    1463         650 :   return ++contexts_disposed_;
    1464             : }
    1465             : 
    1466        1372 : void Heap::StartIncrementalMarking(int gc_flags,
    1467             :                                    GarbageCollectionReason gc_reason,
    1468             :                                    GCCallbackFlags gc_callback_flags) {
    1469             :   DCHECK(incremental_marking()->IsStopped());
    1470             :   set_current_gc_flags(gc_flags);
    1471       30395 :   current_gc_callback_flags_ = gc_callback_flags;
    1472       30395 :   incremental_marking()->Start(gc_reason);
    1473        1372 : }
    1474             : 
    1475     1592770 : void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
    1476             :     int gc_flags, const GCCallbackFlags gc_callback_flags) {
    1477     1592770 :   if (incremental_marking()->IsStopped()) {
    1478     1321451 :     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
    1479     1321451 :     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
    1480        2061 :       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
    1481     1319390 :     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
    1482             :       StartIncrementalMarking(gc_flags,
    1483             :                               GarbageCollectionReason::kAllocationLimit,
    1484             :                               gc_callback_flags);
    1485             :     }
    1486             :   }
    1487     1592770 : }
    1488             : 
    1489          18 : void Heap::StartIdleIncrementalMarking(
    1490             :     GarbageCollectionReason gc_reason,
    1491             :     const GCCallbackFlags gc_callback_flags) {
    1492             :   gc_idle_time_handler_->ResetNoProgressCounter();
    1493             :   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
    1494             :                           gc_callback_flags);
    1495          18 : }
    1496             : 
    1497        1426 : void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
    1498             :                         WriteBarrierMode mode) {
    1499        1426 :   if (len == 0) return;
    1500             : 
    1501             :   DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    1502             :   ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
    1503             :   ObjectSlot src = array->RawFieldOfElementAt(src_index);
    1504        2846 :   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
    1505         150 :     if (dst < src) {
    1506         305 :       for (int i = 0; i < len; i++) {
    1507             :         dst.Relaxed_Store(src.Relaxed_Load());
    1508             :         ++dst;
    1509             :         ++src;
    1510             :       }
    1511             :     } else {
    1512             :       // Copy backwards.
    1513         118 :       dst += len - 1;
    1514             :       src += len - 1;
    1515         332 :       for (int i = 0; i < len; i++) {
    1516             :         dst.Relaxed_Store(src.Relaxed_Load());
    1517             :         --dst;
    1518             :         --src;
    1519             :       }
    1520             :     }
    1521             :   } else {
    1522        1276 :     MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
    1523             :   }
    1524        1426 :   if (mode == SKIP_WRITE_BARRIER) return;
    1525         602 :   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
    1526             : }
    1527             : 
    1528             : #ifdef VERIFY_HEAP
    1529             : // Helper class for verifying the string table.
    1530             : class StringTableVerifier : public ObjectVisitor {
    1531             :  public:
    1532             :   explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
    1533             : 
    1534             :   void VisitPointers(HeapObject host, ObjectSlot start,
    1535             :                      ObjectSlot end) override {
    1536             :     // Visit all HeapObject pointers in [start, end).
    1537             :     for (ObjectSlot p = start; p < end; ++p) {
    1538             :       DCHECK(!HasWeakHeapObjectTag(*p));
    1539             :       if ((*p)->IsHeapObject()) {
    1540             :         HeapObject object = HeapObject::cast(*p);
    1541             :         // Check that the string is actually internalized.
    1542             :         CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
    1543             :               object->IsInternalizedString());
    1544             :       }
    1545             :     }
    1546             :   }
    1547             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    1548             :                      MaybeObjectSlot end) override {
    1549             :     UNREACHABLE();
    1550             :   }
    1551             : 
    1552             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
    1553             : 
    1554             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    1555             :     UNREACHABLE();
    1556             :   }
    1557             : 
    1558             :  private:
    1559             :   Isolate* isolate_;
    1560             : };
    1561             : 
    1562             : static void VerifyStringTable(Isolate* isolate) {
    1563             :   StringTableVerifier verifier(isolate);
    1564             :   isolate->heap()->string_table()->IterateElements(&verifier);
    1565             : }
    1566             : #endif  // VERIFY_HEAP
    1567             : 
    1568    22669769 : bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
    1569             :   bool gc_performed = true;
    1570             :   int counter = 0;
    1571             :   static const int kThreshold = 20;
    1572      635689 :   while (gc_performed && counter++ < kThreshold) {
    1573             :     gc_performed = false;
    1574     1271377 :     for (int space = FIRST_SPACE;
    1575             :          space < SerializerDeserializer::kNumberOfSpaces; space++) {
    1576     1271378 :       Reservation* reservation = &reservations[space];
    1577             :       DCHECK_LE(1, reservation->size());
    1578     1271373 :       if (reservation->at(0).size == 0) {
    1579             :         DCHECK_EQ(1, reservation->size());
    1580             :         continue;
    1581             :       }
    1582             :       bool perform_gc = false;
    1583      423827 :       if (space == MAP_SPACE) {
    1584             :         // We allocate each map individually to avoid fragmentation.
    1585             :         maps->clear();
    1586             :         DCHECK_LE(reservation->size(), 2);
    1587             :         int reserved_size = 0;
    1588      452082 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1589             :         DCHECK_EQ(0, reserved_size % Map::kSize);
    1590      150694 :         int num_maps = reserved_size / Map::kSize;
    1591    45216728 :         for (int i = 0; i < num_maps; i++) {
    1592             :           // The deserializer will update the skip list.
    1593             :           AllocationResult allocation = map_space()->AllocateRawUnaligned(
    1594    22457666 :               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
    1595    22457667 :           HeapObject free_space;
    1596    22457667 :           if (allocation.To(&free_space)) {
    1597             :             // Mark with a free list node, in case we have a GC before
    1598             :             // deserializing.
    1599    22457670 :             Address free_space_address = free_space->address();
    1600             :             CreateFillerObjectAt(free_space_address, Map::kSize,
    1601    22457670 :                                  ClearRecordedSlots::kNo);
    1602    22457674 :             maps->push_back(free_space_address);
    1603             :           } else {
    1604             :             perform_gc = true;
    1605           0 :             break;
    1606             :           }
    1607             :         }
    1608      273133 :       } else if (space == LO_SPACE) {
    1609             :         // Just check that we can allocate during deserialization.
    1610             :         DCHECK_LE(reservation->size(), 2);
    1611             :         int reserved_size = 0;
    1612         105 :         for (const Chunk& c : *reservation) reserved_size += c.size;
    1613          35 :         perform_gc = !CanExpandOldGeneration(reserved_size);
    1614             :       } else {
    1615     2792479 :         for (auto& chunk : *reservation) {
    1616             :           AllocationResult allocation;
    1617     2519381 :           int size = chunk.size;
    1618             :           DCHECK_LE(static_cast<size_t>(size),
    1619             :                     MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1620             :                         static_cast<AllocationSpace>(space)));
    1621     2519381 :           if (space == NEW_SPACE) {
    1622         209 :             allocation = new_space()->AllocateRawUnaligned(size);
    1623             :           } else {
    1624             :             // The deserializer will update the skip list.
    1625             :             allocation = paged_space(space)->AllocateRawUnaligned(
    1626     2519174 :                 size, PagedSpace::IGNORE_SKIP_LIST);
    1627             :           }
    1628     2519384 :           HeapObject free_space;
    1629     2519384 :           if (allocation.To(&free_space)) {
    1630             :             // Mark with a free list node, in case we have a GC before
    1631             :             // deserializing.
    1632             :             Address free_space_address = free_space->address();
    1633             :             CreateFillerObjectAt(free_space_address, size,
    1634     2519382 :                                  ClearRecordedSlots::kNo);
    1635             :             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
    1636             :                       space);
    1637     2519381 :             chunk.start = free_space_address;
    1638     2519381 :             chunk.end = free_space_address + size;
    1639             :           } else {
    1640             :             perform_gc = true;
    1641           1 :             break;
    1642             :           }
    1643             :         }
    1644             :       }
    1645      423832 :       if (perform_gc) {
    1646             :         // We cannot perfom a GC with an uninitialized isolate. This check
    1647             :         // fails for example if the max old space size is chosen unwisely,
    1648             :         // so that we cannot allocate space to deserialize the initial heap.
    1649           1 :         if (!deserialization_complete_) {
    1650             :           V8::FatalProcessOutOfMemory(
    1651           0 :               isolate(), "insufficient memory to create an Isolate");
    1652             :         }
    1653           1 :         if (space == NEW_SPACE) {
    1654           0 :           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
    1655             :         } else {
    1656           1 :           if (counter > 1) {
    1657             :             CollectAllGarbage(kReduceMemoryFootprintMask,
    1658             :                               GarbageCollectionReason::kDeserializer);
    1659             :           } else {
    1660             :             CollectAllGarbage(kNoGCFlags,
    1661             :                               GarbageCollectionReason::kDeserializer);
    1662             :           }
    1663             :         }
    1664             :         gc_performed = true;
    1665             :         break;  // Abort for-loop over spaces and retry.
    1666             :       }
    1667             :     }
    1668             :   }
    1669             : 
    1670      211896 :   return !gc_performed;
    1671             : }
    1672             : 
    1673             : 
    1674       98000 : void Heap::EnsureFromSpaceIsCommitted() {
    1675      294000 :   if (new_space_->CommitFromSpaceIfNeeded()) return;
    1676             : 
    1677             :   // Committing memory to from space failed.
    1678             :   // Memory is exhausted and we will die.
    1679           0 :   FatalProcessOutOfMemory("Committing semi space failed.");
    1680             : }
    1681             : 
    1682             : 
    1683       98000 : void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
    1684      196000 :   if (start_new_space_size == 0) return;
    1685             : 
    1686       84661 :   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
    1687       84661 :                       static_cast<double>(start_new_space_size) * 100);
    1688             : 
    1689       84661 :   if (previous_semi_space_copied_object_size_ > 0) {
    1690             :     promotion_rate_ =
    1691       56779 :         (static_cast<double>(promoted_objects_size_) /
    1692       56779 :          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
    1693             :   } else {
    1694       27882 :     promotion_rate_ = 0;
    1695             :   }
    1696             : 
    1697             :   semi_space_copied_rate_ =
    1698       84661 :       (static_cast<double>(semi_space_copied_object_size_) /
    1699       84661 :        static_cast<double>(start_new_space_size) * 100);
    1700             : 
    1701       84661 :   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
    1702       84661 :   tracer()->AddSurvivalRatio(survival_rate);
    1703             : }
    1704             : 
    1705       98000 : bool Heap::PerformGarbageCollection(
    1706      294147 :     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
    1707             :   size_t freed_global_handles = 0;
    1708             : 
    1709       98000 :   if (!IsYoungGenerationCollector(collector)) {
    1710      443020 :     PROFILE(isolate_, CodeMovingGCEvent());
    1711             :   }
    1712             : 
    1713             : #ifdef VERIFY_HEAP
    1714             :   if (FLAG_verify_heap) {
    1715             :     VerifyStringTable(this->isolate());
    1716             :   }
    1717             : #endif
    1718             : 
    1719             :   GCType gc_type =
    1720       98000 :       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
    1721             : 
    1722             :   {
    1723             :     GCCallbacksScope scope(this);
    1724             :     // Temporary override any embedder stack state as callbacks may create their
    1725             :     // own state on the stack and recursively trigger GC.
    1726             :     EmbedderStackStateScope embedder_scope(
    1727             :         local_embedder_heap_tracer(),
    1728             :         EmbedderHeapTracer::EmbedderStackState::kUnknown);
    1729       98000 :     if (scope.CheckReenter()) {
    1730             :       AllowHeapAllocation allow_allocation;
    1731      391872 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
    1732      195936 :       VMState<EXTERNAL> state(isolate_);
    1733       97968 :       HandleScope handle_scope(isolate_);
    1734      195936 :       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
    1735             :     }
    1736             :   }
    1737             : 
    1738       98000 :   EnsureFromSpaceIsCommitted();
    1739             : 
    1740             :   size_t start_young_generation_size =
    1741      196000 :       Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
    1742             : 
    1743             :   {
    1744             :     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
    1745             : 
    1746       98000 :     switch (collector) {
    1747             :       case MARK_COMPACTOR:
    1748             :         UpdateOldGenerationAllocationCounter();
    1749             :         // Perform mark-sweep with optional compaction.
    1750       74510 :         MarkCompact();
    1751       74510 :         old_generation_size_configured_ = true;
    1752             :         // This should be updated before PostGarbageCollectionProcessing, which
    1753             :         // can cause another GC. Take into account the objects promoted during
    1754             :         // GC.
    1755             :         old_generation_allocation_counter_at_last_gc_ +=
    1756       74510 :             static_cast<size_t>(promoted_objects_size_);
    1757       74510 :         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
    1758       74510 :         break;
    1759             :       case MINOR_MARK_COMPACTOR:
    1760           0 :         MinorMarkCompact();
    1761           0 :         break;
    1762             :       case SCAVENGER:
    1763       23490 :         if ((fast_promotion_mode_ &&
    1764           0 :              CanExpandOldGeneration(new_space()->Size() +
    1765           0 :                                     new_lo_space()->Size()))) {
    1766             :           tracer()->NotifyYoungGenerationHandling(
    1767           0 :               YoungGenerationHandling::kFastPromotionDuringScavenge);
    1768           0 :           EvacuateYoungGeneration();
    1769             :         } else {
    1770             :           tracer()->NotifyYoungGenerationHandling(
    1771       23490 :               YoungGenerationHandling::kRegularScavenge);
    1772             : 
    1773       23490 :           Scavenge();
    1774             :         }
    1775             :         break;
    1776             :     }
    1777             : 
    1778       98000 :     ProcessPretenuringFeedback();
    1779             :   }
    1780             : 
    1781       98000 :   UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
    1782       98000 :   ConfigureInitialOldGenerationSize();
    1783             : 
    1784       98000 :   if (collector != MARK_COMPACTOR) {
    1785             :     // Objects that died in the new space might have been accounted
    1786             :     // as bytes marked ahead of schedule by the incremental marker.
    1787             :     incremental_marking()->UpdateMarkedBytesAfterScavenge(
    1788       46980 :         start_young_generation_size - SurvivedYoungObjectSize());
    1789             :   }
    1790             : 
    1791       98000 :   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
    1792       98000 :     ComputeFastPromotionMode();
    1793             :   }
    1794             : 
    1795      196000 :   isolate_->counters()->objs_since_last_young()->Set(0);
    1796             : 
    1797             :   {
    1798      392000 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1799             :     // First round weak callbacks are not supposed to allocate and trigger
    1800             :     // nested GCs.
    1801             :     freed_global_handles =
    1802      294000 :         isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
    1803             :   }
    1804             : 
    1805       98000 :   if (collector == MARK_COMPACTOR) {
    1806      298040 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
    1807             :     // TraceEpilogue may trigger operations that invalidate global handles. It
    1808             :     // has to be called *after* all other operations that potentially touch and
    1809             :     // reset global handles. It is also still part of the main garbage
    1810             :     // collection pause and thus needs to be called *before* any operation that
    1811             :     // can potentially trigger recursive garbage
    1812      149020 :     local_embedder_heap_tracer()->TraceEpilogue();
    1813             :   }
    1814             : 
    1815             :   {
    1816      392000 :     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
    1817       98000 :     gc_post_processing_depth_++;
    1818             :     {
    1819             :       AllowHeapAllocation allow_allocation;
    1820             :       freed_global_handles +=
    1821             :           isolate_->global_handles()->PostGarbageCollectionProcessing(
    1822      196000 :               collector, gc_callback_flags);
    1823             :     }
    1824      196000 :     gc_post_processing_depth_--;
    1825             :   }
    1826             : 
    1827      196000 :   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
    1828             : 
    1829             :   // Update relocatables.
    1830       98000 :   Relocatable::PostGarbageCollectionProcessing(isolate_);
    1831             : 
    1832       98000 :   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
    1833             :   double mutator_speed =
    1834       98000 :       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
    1835       98000 :   size_t old_gen_size = OldGenerationSizeOfObjects();
    1836       98000 :   if (collector == MARK_COMPACTOR) {
    1837             :     // Register the amount of external allocated memory.
    1838             :     isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
    1839       74510 :         isolate()->isolate_data()->external_memory_;
    1840             :     isolate()->isolate_data()->external_memory_limit_ =
    1841       74510 :         isolate()->isolate_data()->external_memory_ +
    1842       74510 :         kExternalAllocationSoftLimit;
    1843             : 
    1844             :     double max_factor =
    1845      149020 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1846             :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1847             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1848      223530 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1849       74510 :     old_generation_allocation_limit_ = new_limit;
    1850             : 
    1851             :     CheckIneffectiveMarkCompact(
    1852       74510 :         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
    1853       23490 :   } else if (HasLowYoungGenerationAllocationRate() &&
    1854             :              old_generation_size_configured_) {
    1855             :     double max_factor =
    1856         294 :         heap_controller()->MaxGrowingFactor(max_old_generation_size_);
    1857             :     size_t new_limit = heap_controller()->CalculateAllocationLimit(
    1858             :         old_gen_size, max_old_generation_size_, max_factor, gc_speed,
    1859         441 :         mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
    1860         147 :     if (new_limit < old_generation_allocation_limit_) {
    1861           2 :       old_generation_allocation_limit_ = new_limit;
    1862             :     }
    1863             :   }
    1864             : 
    1865             :   {
    1866             :     GCCallbacksScope scope(this);
    1867       98000 :     if (scope.CheckReenter()) {
    1868             :       AllowHeapAllocation allow_allocation;
    1869      391872 :       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
    1870      195936 :       VMState<EXTERNAL> state(isolate_);
    1871       97968 :       HandleScope handle_scope(isolate_);
    1872      195936 :       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
    1873             :     }
    1874             :   }
    1875             : 
    1876             : #ifdef VERIFY_HEAP
    1877             :   if (FLAG_verify_heap) {
    1878             :     VerifyStringTable(this->isolate());
    1879             :   }
    1880             : #endif
    1881             : 
    1882       98000 :   return freed_global_handles > 0;
    1883             : }
    1884             : 
    1885             : 
    1886      125265 : void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1887             :   RuntimeCallTimerScope runtime_timer(
    1888      125265 :       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
    1889      250597 :   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
    1890          67 :     if (gc_type & info.gc_type) {
    1891             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1892          67 :       info.callback(isolate, gc_type, flags, info.data);
    1893             :     }
    1894             :   }
    1895      125265 : }
    1896             : 
    1897      125265 : void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
    1898             :   RuntimeCallTimerScope runtime_timer(
    1899      125265 :       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
    1900      375860 :   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
    1901      125330 :     if (gc_type & info.gc_type) {
    1902             :       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
    1903       74563 :       info.callback(isolate, gc_type, flags, info.data);
    1904             :     }
    1905             :   }
    1906      125265 : }
    1907             : 
    1908             : 
    1909       74510 : void Heap::MarkCompact() {
    1910       74510 :   PauseAllocationObserversScope pause_observers(this);
    1911             : 
    1912             :   SetGCState(MARK_COMPACT);
    1913             : 
    1914      149020 :   LOG(isolate_, ResourceEvent("markcompact", "begin"));
    1915             : 
    1916             :   uint64_t size_of_objects_before_gc = SizeOfObjects();
    1917             : 
    1918      149020 :   CodeSpaceMemoryModificationScope code_modifcation(this);
    1919             : 
    1920       74510 :   mark_compact_collector()->Prepare();
    1921             : 
    1922       74510 :   ms_count_++;
    1923             : 
    1924       74510 :   MarkCompactPrologue();
    1925             : 
    1926       74510 :   mark_compact_collector()->CollectGarbage();
    1927             : 
    1928      149020 :   LOG(isolate_, ResourceEvent("markcompact", "end"));
    1929             : 
    1930       74510 :   MarkCompactEpilogue();
    1931             : 
    1932       74510 :   if (FLAG_allocation_site_pretenuring) {
    1933       74510 :     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
    1934       74510 :   }
    1935       74510 : }
    1936             : 
    1937           0 : void Heap::MinorMarkCompact() {
    1938             : #ifdef ENABLE_MINOR_MC
    1939             :   DCHECK(FLAG_minor_mc);
    1940             : 
    1941           0 :   PauseAllocationObserversScope pause_observers(this);
    1942             :   SetGCState(MINOR_MARK_COMPACT);
    1943           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
    1944             : 
    1945           0 :   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
    1946             :   AlwaysAllocateScope always_allocate(isolate());
    1947             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    1948             :       incremental_marking());
    1949           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    1950             : 
    1951           0 :   minor_mark_compact_collector()->CollectGarbage();
    1952             : 
    1953           0 :   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
    1954           0 :   SetGCState(NOT_IN_GC);
    1955             : #else
    1956             :   UNREACHABLE();
    1957             : #endif  // ENABLE_MINOR_MC
    1958           0 : }
    1959             : 
    1960       74510 : void Heap::MarkCompactEpilogue() {
    1961      298040 :   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
    1962             :   SetGCState(NOT_IN_GC);
    1963             : 
    1964      149020 :   isolate_->counters()->objs_since_last_full()->Set(0);
    1965             : 
    1966       74510 :   incremental_marking()->Epilogue();
    1967             : 
    1968       74510 :   DCHECK(incremental_marking()->IsStopped());
    1969       74510 : }
    1970             : 
    1971             : 
    1972       74510 : void Heap::MarkCompactPrologue() {
    1973      298040 :   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
    1974      149020 :   isolate_->descriptor_lookup_cache()->Clear();
    1975       74510 :   RegExpResultsCache::Clear(string_split_cache());
    1976       74510 :   RegExpResultsCache::Clear(regexp_multiple_cache());
    1977             : 
    1978      149020 :   isolate_->compilation_cache()->MarkCompactPrologue();
    1979             : 
    1980      149020 :   FlushNumberStringCache();
    1981       74510 : }
    1982             : 
    1983             : 
    1984      294000 : void Heap::CheckNewSpaceExpansionCriteria() {
    1985       98000 :   if (FLAG_experimental_new_space_growth_heuristic) {
    1986           0 :     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    1987           0 :         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
    1988             :       // Grow the size of new space if there is room to grow, and more than 10%
    1989             :       // have survived the last scavenge.
    1990           0 :       new_space_->Grow();
    1991           0 :       survived_since_last_expansion_ = 0;
    1992             :     }
    1993      290977 :   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
    1994       94977 :              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
    1995             :     // Grow the size of new space if there is room to grow, and enough data
    1996             :     // has survived scavenge since the last expansion.
    1997        1851 :     new_space_->Grow();
    1998        1851 :     survived_since_last_expansion_ = 0;
    1999             :   }
    2000       98000 :   new_lo_space()->SetCapacity(new_space()->Capacity());
    2001       98000 : }
    2002             : 
    2003           0 : void Heap::EvacuateYoungGeneration() {
    2004           0 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
    2005           0 :   base::MutexGuard guard(relocation_mutex());
    2006           0 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2007             :   if (!FLAG_concurrent_marking) {
    2008             :     DCHECK(fast_promotion_mode_);
    2009             :     DCHECK(
    2010             :         CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
    2011             :   }
    2012             : 
    2013           0 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2014             : 
    2015             :   SetGCState(SCAVENGE);
    2016           0 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2017             : 
    2018             :   // Move pages from new->old generation.
    2019             :   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
    2020           0 :   for (auto it = range.begin(); it != range.end();) {
    2021             :     Page* p = (*++it)->prev_page();
    2022           0 :     new_space()->from_space().RemovePage(p);
    2023           0 :     Page::ConvertNewToOld(p);
    2024           0 :     if (incremental_marking()->IsMarking())
    2025           0 :       mark_compact_collector()->RecordLiveSlotsOnPage(p);
    2026             :   }
    2027             : 
    2028             :   // Reset new space.
    2029           0 :   if (!new_space()->Rebalance()) {
    2030           0 :     FatalProcessOutOfMemory("NewSpace::Rebalance");
    2031             :   }
    2032           0 :   new_space()->ResetLinearAllocationArea();
    2033             :   new_space()->set_age_mark(new_space()->top());
    2034             : 
    2035           0 :   for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
    2036             :     LargePage* page = *it;
    2037             :     // Increment has to happen after we save the page, because it is going to
    2038             :     // be removed below.
    2039             :     it++;
    2040           0 :     lo_space()->PromoteNewLargeObject(page);
    2041             :   }
    2042             : 
    2043             :   // Fix up special trackers.
    2044           0 :   external_string_table_.PromoteYoung();
    2045             :   // GlobalHandles are updated in PostGarbageCollectonProcessing
    2046             : 
    2047           0 :   size_t promoted = new_space()->Size() + new_lo_space()->Size();
    2048             :   IncrementYoungSurvivorsCounter(promoted);
    2049             :   IncrementPromotedObjectsSize(promoted);
    2050             :   IncrementSemiSpaceCopiedObjectSize(0);
    2051             : 
    2052           0 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2053           0 :   SetGCState(NOT_IN_GC);
    2054           0 : }
    2055             : 
    2056      117450 : void Heap::Scavenge() {
    2057       93960 :   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
    2058       23490 :   base::MutexGuard guard(relocation_mutex());
    2059       46980 :   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
    2060             :   // There are soft limits in the allocation code, designed to trigger a mark
    2061             :   // sweep collection by failing allocations. There is no sense in trying to
    2062             :   // trigger one during scavenge: scavenges allocation should always succeed.
    2063             :   AlwaysAllocateScope scope(isolate());
    2064             : 
    2065             :   // Bump-pointer allocations done during scavenge are not real allocations.
    2066             :   // Pause the inline allocation steps.
    2067       46980 :   PauseAllocationObserversScope pause_observers(this);
    2068             :   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
    2069             :       incremental_marking());
    2070             : 
    2071             : 
    2072       23490 :   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    2073             : 
    2074             :   SetGCState(SCAVENGE);
    2075             : 
    2076             :   // Flip the semispaces.  After flipping, to space is empty, from space has
    2077             :   // live objects.
    2078       23490 :   new_space()->Flip();
    2079       23490 :   new_space()->ResetLinearAllocationArea();
    2080             : 
    2081             :   // We also flip the young generation large object space. All large objects
    2082             :   // will be in the from space.
    2083       23490 :   new_lo_space()->Flip();
    2084             :   new_lo_space()->ResetPendingObject();
    2085             : 
    2086             :   // Implements Cheney's copying algorithm
    2087       46980 :   LOG(isolate_, ResourceEvent("scavenge", "begin"));
    2088             : 
    2089       23490 :   scavenger_collector_->CollectGarbage();
    2090             : 
    2091       46980 :   LOG(isolate_, ResourceEvent("scavenge", "end"));
    2092             : 
    2093       23490 :   SetGCState(NOT_IN_GC);
    2094       23490 : }
    2095             : 
    2096       98000 : void Heap::ComputeFastPromotionMode() {
    2097             :   const size_t survived_in_new_space =
    2098      196000 :       survived_last_scavenge_ * 100 / new_space_->Capacity();
    2099             :   fast_promotion_mode_ =
    2100      196000 :       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
    2101       98000 :       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
    2102       98000 :       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
    2103       98000 :   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    2104             :     PrintIsolate(
    2105             :         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
    2106           0 :         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
    2107             :   }
    2108       98000 : }
    2109             : 
    2110     2025078 : void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
    2111     2025078 :   if (unprotected_memory_chunks_registry_enabled_) {
    2112     1808823 :     base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
    2113     1808823 :     if (unprotected_memory_chunks_.insert(chunk).second) {
    2114     1803023 :       chunk->SetReadAndWritable();
    2115             :     }
    2116             :   }
    2117     2025079 : }
    2118             : 
    2119     1893278 : void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
    2120     1937209 :   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
    2121     1893279 : }
    2122             : 
    2123      133742 : void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
    2124             :   unprotected_memory_chunks_.erase(chunk);
    2125      133742 : }
    2126             : 
    2127     1758364 : void Heap::ProtectUnprotectedMemoryChunks() {
    2128             :   DCHECK(unprotected_memory_chunks_registry_enabled_);
    2129     5319752 :   for (auto chunk = unprotected_memory_chunks_.begin();
    2130             :        chunk != unprotected_memory_chunks_.end(); chunk++) {
    2131     3606048 :     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
    2132     1803024 :     (*chunk)->SetDefaultCodePermissions();
    2133             :   }
    2134             :   unprotected_memory_chunks_.clear();
    2135     1758363 : }
    2136             : 
    2137           0 : bool Heap::ExternalStringTable::Contains(String string) {
    2138           0 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2139           0 :     if (young_strings_[i] == string) return true;
    2140             :   }
    2141           0 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2142           0 :     if (old_strings_[i] == string) return true;
    2143             :   }
    2144             :   return false;
    2145             : }
    2146             : 
    2147       86458 : void Heap::UpdateExternalString(String string, size_t old_payload,
    2148             :                                 size_t new_payload) {
    2149             :   DCHECK(string->IsExternalString());
    2150             :   Page* page = Page::FromHeapObject(string);
    2151             : 
    2152       86458 :   if (old_payload > new_payload) {
    2153             :     page->DecrementExternalBackingStoreBytes(
    2154          17 :         ExternalBackingStoreType::kExternalString, old_payload - new_payload);
    2155             :   } else {
    2156             :     page->IncrementExternalBackingStoreBytes(
    2157       86441 :         ExternalBackingStoreType::kExternalString, new_payload - old_payload);
    2158             :   }
    2159       86458 : }
    2160             : 
    2161         127 : String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
    2162             :                                                             FullObjectSlot p) {
    2163             :   HeapObject obj = HeapObject::cast(*p);
    2164             :   MapWord first_word = obj->map_word();
    2165             : 
    2166         127 :   String new_string;
    2167             : 
    2168         127 :   if (InFromPage(obj)) {
    2169         127 :     if (!first_word.IsForwardingAddress()) {
    2170             :       // Unreachable external string can be finalized.
    2171         124 :       String string = String::cast(obj);
    2172         124 :       if (!string->IsExternalString()) {
    2173             :         // Original external string has been internalized.
    2174             :         DCHECK(string->IsThinString());
    2175           5 :         return String();
    2176             :       }
    2177         119 :       heap->FinalizeExternalString(string);
    2178         119 :       return String();
    2179             :     }
    2180           3 :     new_string = String::cast(first_word.ToForwardingAddress());
    2181             :   } else {
    2182           0 :     new_string = String::cast(obj);
    2183             :   }
    2184             : 
    2185             :   // String is still reachable.
    2186           3 :   if (new_string->IsThinString()) {
    2187             :     // Filtering Thin strings out of the external string table.
    2188           0 :     return String();
    2189           3 :   } else if (new_string->IsExternalString()) {
    2190             :     MemoryChunk::MoveExternalBackingStoreBytes(
    2191             :         ExternalBackingStoreType::kExternalString,
    2192             :         Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
    2193           6 :         ExternalString::cast(new_string)->ExternalPayloadSize());
    2194           3 :     return new_string;
    2195             :   }
    2196             : 
    2197             :   // Internalization can replace external strings with non-external strings.
    2198           0 :   return new_string->IsExternalString() ? new_string : String();
    2199             : }
    2200             : 
    2201           0 : void Heap::ExternalStringTable::VerifyYoung() {
    2202             : #ifdef DEBUG
    2203             :   std::set<String> visited_map;
    2204             :   std::map<MemoryChunk*, size_t> size_map;
    2205             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2206             :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    2207             :     String obj = String::cast(young_strings_[i]);
    2208             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2209             :     DCHECK(mc->InYoungGeneration());
    2210             :     DCHECK(heap_->InYoungGeneration(obj));
    2211             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2212             :     DCHECK(obj->IsExternalString());
    2213             :     // Note: we can have repeated elements in the table.
    2214             :     DCHECK_EQ(0, visited_map.count(obj));
    2215             :     visited_map.insert(obj);
    2216             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2217             :   }
    2218             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2219             :        it != size_map.end(); it++)
    2220             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2221             : #endif
    2222           0 : }
    2223             : 
    2224           0 : void Heap::ExternalStringTable::Verify() {
    2225             : #ifdef DEBUG
    2226             :   std::set<String> visited_map;
    2227             :   std::map<MemoryChunk*, size_t> size_map;
    2228             :   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
    2229             :   VerifyYoung();
    2230             :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    2231             :     String obj = String::cast(old_strings_[i]);
    2232             :     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
    2233             :     DCHECK(!mc->InYoungGeneration());
    2234             :     DCHECK(!heap_->InYoungGeneration(obj));
    2235             :     DCHECK(!obj->IsTheHole(heap_->isolate()));
    2236             :     DCHECK(obj->IsExternalString());
    2237             :     // Note: we can have repeated elements in the table.
    2238             :     DCHECK_EQ(0, visited_map.count(obj));
    2239             :     visited_map.insert(obj);
    2240             :     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
    2241             :   }
    2242             :   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
    2243             :        it != size_map.end(); it++)
    2244             :     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
    2245             : #endif
    2246           0 : }
    2247             : 
    2248       98000 : void Heap::ExternalStringTable::UpdateYoungReferences(
    2249             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2250      196000 :   if (young_strings_.empty()) return;
    2251             : 
    2252             :   FullObjectSlot start(&young_strings_[0]);
    2253             :   FullObjectSlot end(&young_strings_[young_strings_.size()]);
    2254             :   FullObjectSlot last = start;
    2255             : 
    2256         183 :   for (FullObjectSlot p = start; p < end; ++p) {
    2257         147 :     String target = updater_func(heap_, p);
    2258             : 
    2259         271 :     if (target.is_null()) continue;
    2260             : 
    2261             :     DCHECK(target->IsExternalString());
    2262             : 
    2263          23 :     if (InYoungGeneration(target)) {
    2264             :       // String is still in new space. Update the table entry.
    2265             :       last.store(target);
    2266             :       ++last;
    2267             :     } else {
    2268             :       // String got promoted. Move it to the old string list.
    2269           0 :       old_strings_.push_back(target);
    2270             :     }
    2271             :   }
    2272             : 
    2273             :   DCHECK(last <= end);
    2274          18 :   young_strings_.resize(last - start);
    2275             : #ifdef VERIFY_HEAP
    2276             :   if (FLAG_verify_heap) {
    2277             :     VerifyYoung();
    2278             :   }
    2279             : #endif
    2280             : }
    2281             : 
    2282           0 : void Heap::ExternalStringTable::PromoteYoung() {
    2283           0 :   old_strings_.reserve(old_strings_.size() + young_strings_.size());
    2284             :   std::move(std::begin(young_strings_), std::end(young_strings_),
    2285             :             std::back_inserter(old_strings_));
    2286             :   young_strings_.clear();
    2287           0 : }
    2288             : 
    2289       76203 : void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
    2290       76203 :   if (!young_strings_.empty()) {
    2291             :     v->VisitRootPointers(
    2292             :         Root::kExternalStringsTable, nullptr,
    2293             :         FullObjectSlot(&young_strings_[0]),
    2294          62 :         FullObjectSlot(&young_strings_[young_strings_.size()]));
    2295             :   }
    2296       76203 : }
    2297             : 
    2298       76203 : void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
    2299       76203 :   IterateYoung(v);
    2300       76203 :   if (!old_strings_.empty()) {
    2301             :     v->VisitRootPointers(
    2302             :         Root::kExternalStringsTable, nullptr,
    2303             :         FullObjectSlot(old_strings_.data()),
    2304      228444 :         FullObjectSlot(old_strings_.data() + old_strings_.size()));
    2305             :   }
    2306       76203 : }
    2307             : 
    2308       23490 : void Heap::UpdateYoungReferencesInExternalStringTable(
    2309             :     ExternalStringTableUpdaterCallback updater_func) {
    2310       23490 :   external_string_table_.UpdateYoungReferences(updater_func);
    2311       23490 : }
    2312             : 
    2313       74510 : void Heap::ExternalStringTable::UpdateReferences(
    2314             :     Heap::ExternalStringTableUpdaterCallback updater_func) {
    2315      149020 :   if (old_strings_.size() > 0) {
    2316             :     FullObjectSlot start(old_strings_.data());
    2317       74400 :     FullObjectSlot end(old_strings_.data() + old_strings_.size());
    2318      258241 :     for (FullObjectSlot p = start; p < end; ++p)
    2319      218882 :       p.store(updater_func(heap_, p));
    2320             :   }
    2321             : 
    2322       74510 :   UpdateYoungReferences(updater_func);
    2323       74510 : }
    2324             : 
    2325       74510 : void Heap::UpdateReferencesInExternalStringTable(
    2326             :     ExternalStringTableUpdaterCallback updater_func) {
    2327       74510 :   external_string_table_.UpdateReferences(updater_func);
    2328       74510 : }
    2329             : 
    2330             : 
    2331       74510 : void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
    2332             :   ProcessNativeContexts(retainer);
    2333             :   ProcessAllocationSites(retainer);
    2334       74510 : }
    2335             : 
    2336             : 
    2337       23490 : void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
    2338             :   ProcessNativeContexts(retainer);
    2339       23490 : }
    2340             : 
    2341             : 
    2342           0 : void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
    2343       98000 :   Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
    2344             :   // Update the head of the list of contexts.
    2345             :   set_native_contexts_list(head);
    2346           0 : }
    2347             : 
    2348             : 
    2349           0 : void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
    2350             :   Object allocation_site_obj =
    2351       74510 :       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
    2352             :   set_allocation_sites_list(allocation_site_obj);
    2353           0 : }
    2354             : 
    2355       74510 : void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
    2356       74510 :   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
    2357       74510 :   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
    2358       74510 : }
    2359             : 
    2360         306 : void Heap::ForeachAllocationSite(
    2361             :     Object list, const std::function<void(AllocationSite)>& visitor) {
    2362             :   DisallowHeapAllocation disallow_heap_allocation;
    2363         306 :   Object current = list;
    2364        2000 :   while (current->IsAllocationSite()) {
    2365             :     AllocationSite site = AllocationSite::cast(current);
    2366        1388 :     visitor(site);
    2367        1388 :     Object current_nested = site->nested_site();
    2368        2804 :     while (current_nested->IsAllocationSite()) {
    2369             :       AllocationSite nested_site = AllocationSite::cast(current_nested);
    2370          28 :       visitor(nested_site);
    2371          28 :       current_nested = nested_site->nested_site();
    2372             :     }
    2373        1388 :     current = site->weak_next();
    2374             :   }
    2375         306 : }
    2376             : 
    2377         141 : void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
    2378             :   DisallowHeapAllocation no_allocation_scope;
    2379         141 :   bool marked = false;
    2380             : 
    2381             :   ForeachAllocationSite(allocation_sites_list(),
    2382         961 :                         [&marked, flag, this](AllocationSite site) {
    2383         961 :                           if (site->GetPretenureMode() == flag) {
    2384           0 :                             site->ResetPretenureDecision();
    2385             :                             site->set_deopt_dependent_code(true);
    2386           0 :                             marked = true;
    2387           0 :                             RemoveAllocationSitePretenuringFeedback(site);
    2388         961 :                             return;
    2389             :                           }
    2390         282 :                         });
    2391         141 :   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
    2392         141 : }
    2393             : 
    2394             : 
    2395       74510 : void Heap::EvaluateOldSpaceLocalPretenuring(
    2396             :     uint64_t size_of_objects_before_gc) {
    2397             :   uint64_t size_of_objects_after_gc = SizeOfObjects();
    2398             :   double old_generation_survival_rate =
    2399       74510 :       (static_cast<double>(size_of_objects_after_gc) * 100) /
    2400       74510 :       static_cast<double>(size_of_objects_before_gc);
    2401             : 
    2402       74510 :   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
    2403             :     // Too many objects died in the old generation, pretenuring of wrong
    2404             :     // allocation sites may be the cause for that. We have to deopt all
    2405             :     // dependent code registered in the allocation sites to re-evaluate
    2406             :     // our pretenuring decisions.
    2407         141 :     ResetAllAllocationSitesDependentCode(TENURED);
    2408         141 :     if (FLAG_trace_pretenuring) {
    2409             :       PrintF(
    2410             :           "Deopt all allocation sites dependent code due to low survival "
    2411             :           "rate in the old generation %f\n",
    2412           0 :           old_generation_survival_rate);
    2413             :     }
    2414             :   }
    2415       74510 : }
    2416             : 
    2417             : 
    2418           5 : void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
    2419             :   DisallowHeapAllocation no_allocation;
    2420             :   // All external strings are listed in the external string table.
    2421             : 
    2422           0 :   class ExternalStringTableVisitorAdapter : public RootVisitor {
    2423             :    public:
    2424             :     explicit ExternalStringTableVisitorAdapter(
    2425             :         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
    2426           5 :         : isolate_(isolate), visitor_(visitor) {}
    2427           5 :     void VisitRootPointers(Root root, const char* description,
    2428             :                            FullObjectSlot start, FullObjectSlot end) override {
    2429          35 :       for (FullObjectSlot p = start; p < end; ++p) {
    2430             :         DCHECK((*p)->IsExternalString());
    2431             :         visitor_->VisitExternalString(
    2432          50 :             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
    2433             :       }
    2434           5 :     }
    2435             : 
    2436             :    private:
    2437             :     Isolate* isolate_;
    2438             :     v8::ExternalResourceVisitor* visitor_;
    2439             :   } external_string_table_visitor(isolate(), visitor);
    2440             : 
    2441           5 :   external_string_table_.IterateAll(&external_string_table_visitor);
    2442           5 : }
    2443             : 
    2444             : STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
    2445             :               0);  // NOLINT
    2446             : STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
    2447             :               0);  // NOLINT
    2448             : #ifdef V8_HOST_ARCH_32_BIT
    2449             : STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
    2450             :               0);  // NOLINT
    2451             : #endif
    2452             : 
    2453             : 
    2454          25 : int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
    2455          25 :   switch (alignment) {
    2456             :     case kWordAligned:
    2457             :       return 0;
    2458             :     case kDoubleAligned:
    2459             :     case kDoubleUnaligned:
    2460             :       return kDoubleSize - kTaggedSize;
    2461             :     default:
    2462           0 :       UNREACHABLE();
    2463             :   }
    2464             :   return 0;
    2465             : }
    2466             : 
    2467             : 
    2468    89214746 : int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
    2469    89214746 :   if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
    2470             :     return kTaggedSize;
    2471             :   if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
    2472             :     return kDoubleSize - kTaggedSize;  // No fill if double is always aligned.
    2473             :   return 0;
    2474             : }
    2475             : 
    2476           0 : HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
    2477           0 :   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
    2478           0 :   return HeapObject::FromAddress(object->address() + filler_size);
    2479             : }
    2480             : 
    2481           0 : HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
    2482             :                                  int allocation_size,
    2483             :                                  AllocationAlignment alignment) {
    2484           0 :   int filler_size = allocation_size - object_size;
    2485             :   DCHECK_LT(0, filler_size);
    2486             :   int pre_filler = GetFillToAlign(object->address(), alignment);
    2487           0 :   if (pre_filler) {
    2488           0 :     object = PrecedeWithFiller(object, pre_filler);
    2489           0 :     filler_size -= pre_filler;
    2490             :   }
    2491           0 :   if (filler_size) {
    2492             :     CreateFillerObjectAt(object->address() + object_size, filler_size,
    2493           0 :                          ClearRecordedSlots::kNo);
    2494             :   }
    2495           0 :   return object;
    2496             : }
    2497             : 
    2498      405977 : void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
    2499      405977 :   ArrayBufferTracker::RegisterNew(this, buffer);
    2500      405981 : }
    2501             : 
    2502        5351 : void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
    2503        5351 :   ArrayBufferTracker::Unregister(this, buffer);
    2504        5351 : }
    2505             : 
    2506       98000 : void Heap::ConfigureInitialOldGenerationSize() {
    2507      105306 :   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
    2508             :     const size_t new_limit =
    2509        7276 :         Max(OldGenerationSizeOfObjects() +
    2510             :                 heap_controller()->MinimumAllocationLimitGrowingStep(
    2511       14552 :                     CurrentHeapGrowingMode()),
    2512             :             static_cast<size_t>(
    2513       14552 :                 static_cast<double>(old_generation_allocation_limit_) *
    2514       21828 :                 (tracer()->AverageSurvivalRatio() / 100)));
    2515        7276 :     if (new_limit < old_generation_allocation_limit_) {
    2516        6611 :       old_generation_allocation_limit_ = new_limit;
    2517             :     } else {
    2518         665 :       old_generation_size_configured_ = true;
    2519             :     }
    2520             :   }
    2521       98000 : }
    2522             : 
    2523       74510 : void Heap::FlushNumberStringCache() {
    2524             :   // Flush the number to string cache.
    2525      149020 :   int len = number_string_cache()->length();
    2526   333050638 :   for (int i = 0; i < len; i++) {
    2527   332976128 :     number_string_cache()->set_undefined(i);
    2528             :   }
    2529       74510 : }
    2530             : 
    2531    89654357 : HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
    2532             :                                       ClearRecordedSlots clear_slots_mode,
    2533             :                                       ClearFreedMemoryMode clear_memory_mode) {
    2534    89654357 :   if (size == 0) return HeapObject();
    2535    88502700 :   HeapObject filler = HeapObject::FromAddress(addr);
    2536    88502700 :   if (size == kTaggedSize) {
    2537             :     filler->set_map_after_allocation(
    2538             :         Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
    2539     3282646 :         SKIP_WRITE_BARRIER);
    2540    85220054 :   } else if (size == 2 * kTaggedSize) {
    2541             :     filler->set_map_after_allocation(
    2542             :         Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
    2543     3773719 :         SKIP_WRITE_BARRIER);
    2544     3773720 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2545       13486 :       Memory<Tagged_t>(addr + kTaggedSize) =
    2546       13486 :           static_cast<Tagged_t>(kClearedFreeMemoryValue);
    2547             :     }
    2548             :   } else {
    2549             :     DCHECK_GT(size, 2 * kTaggedSize);
    2550             :     filler->set_map_after_allocation(
    2551             :         Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
    2552    81446335 :         SKIP_WRITE_BARRIER);
    2553             :     FreeSpace::cast(filler)->relaxed_write_size(size);
    2554    81428178 :     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
    2555             :       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
    2556      262538 :                    (size / kTaggedSize) - 2);
    2557             :     }
    2558             :   }
    2559    88472698 :   if (clear_slots_mode == ClearRecordedSlots::kYes) {
    2560     1938354 :     ClearRecordedSlotRange(addr, addr + size);
    2561             :   }
    2562             : 
    2563             :   // At this point, we may be deserializing the heap from a snapshot, and
    2564             :   // none of the maps have been created yet and are nullptr.
    2565             :   DCHECK((filler->map_slot().contains_value(kNullAddress) &&
    2566             :           !deserialization_complete_) ||
    2567             :          filler->map()->IsMap());
    2568    88472698 :   return filler;
    2569             : }
    2570             : 
    2571      181742 : bool Heap::CanMoveObjectStart(HeapObject object) {
    2572      181742 :   if (!FLAG_move_object_start) return false;
    2573             : 
    2574             :   // Sampling heap profiler may have a reference to the object.
    2575      363484 :   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
    2576             : 
    2577      181742 :   if (IsLargeObject(object)) return false;
    2578             : 
    2579             :   // We can move the object start if the page was already swept.
    2580      181728 :   return Page::FromHeapObject(object)->SweepingDone();
    2581             : }
    2582             : 
    2583       43929 : bool Heap::IsImmovable(HeapObject object) {
    2584             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    2585      175731 :   return chunk->NeverEvacuate() || IsLargeObject(object);
    2586             : }
    2587             : 
    2588      809434 : bool Heap::IsLargeObject(HeapObject object) {
    2589     3257819 :   return MemoryChunk::FromHeapObject(object)->IsLargePage();
    2590             : }
    2591             : 
    2592             : #ifdef ENABLE_SLOW_DCHECKS
    2593             : namespace {
    2594             : 
    2595             : class LeftTrimmerVerifierRootVisitor : public RootVisitor {
    2596             :  public:
    2597             :   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
    2598             :       : to_check_(to_check) {}
    2599             : 
    2600             :   void VisitRootPointers(Root root, const char* description,
    2601             :                          FullObjectSlot start, FullObjectSlot end) override {
    2602             :     for (FullObjectSlot p = start; p < end; ++p) {
    2603             :       DCHECK_NE(*p, to_check_);
    2604             :     }
    2605             :   }
    2606             : 
    2607             :  private:
    2608             :   FixedArrayBase to_check_;
    2609             : 
    2610             :   DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
    2611             : };
    2612             : }  // namespace
    2613             : #endif  // ENABLE_SLOW_DCHECKS
    2614             : 
    2615             : namespace {
    2616       57671 : bool MayContainRecordedSlots(HeapObject object) {
    2617             :   // New space object do not have recorded slots.
    2618      115342 :   if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
    2619             :   // Whitelist objects that definitely do not have pointers.
    2620        4683 :   if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
    2621             :   // Conservatively return true for other objects.
    2622        2341 :   return true;
    2623             : }
    2624             : }  // namespace
    2625             : 
    2626      668878 : void Heap::OnMoveEvent(HeapObject target, HeapObject source,
    2627             :                        int size_in_bytes) {
    2628      670351 :   HeapProfiler* heap_profiler = isolate_->heap_profiler();
    2629      668878 :   if (heap_profiler->is_tracking_object_moves()) {
    2630             :     heap_profiler->ObjectMoveEvent(source->address(), target->address(),
    2631      173529 :                                    size_in_bytes);
    2632             :   }
    2633     1498973 :   for (auto& tracker : allocation_trackers_) {
    2634      323094 :     tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
    2635             :   }
    2636      668669 :   if (target->IsSharedFunctionInfo()) {
    2637        4367 :     LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
    2638             :                                                          target->address()));
    2639             :   }
    2640             : 
    2641             :   if (FLAG_verify_predictable) {
    2642             :     ++allocations_count_;
    2643             :     // Advance synthetic time by making a time request.
    2644             :     MonotonicallyIncreasingTimeInMs();
    2645             : 
    2646             :     UpdateAllocationsHash(source);
    2647             :     UpdateAllocationsHash(target);
    2648             :     UpdateAllocationsHash(size_in_bytes);
    2649             : 
    2650             :     if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
    2651             :       PrintAllocationsHash();
    2652             :     }
    2653      668740 :   } else if (FLAG_fuzzer_gc_analysis) {
    2654           0 :     ++allocations_count_;
    2655             :   }
    2656      668740 : }
    2657             : 
    2658      181760 : FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
    2659             :                                         int elements_to_trim) {
    2660      181760 :   if (elements_to_trim == 0) {
    2661             :     // This simplifies reasoning in the rest of the function.
    2662           0 :     return object;
    2663             :   }
    2664      181760 :   CHECK(!object.is_null());
    2665             :   DCHECK(CanMoveObjectStart(object));
    2666             :   // Add custom visitor to concurrent marker if new left-trimmable type
    2667             :   // is added.
    2668             :   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
    2669             :   const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
    2670      181760 :   const int bytes_to_trim = elements_to_trim * element_size;
    2671             :   Map map = object->map();
    2672             : 
    2673             :   // For now this trick is only applied to fixed arrays which may be in new
    2674             :   // space or old space. In a large object space the object's start must
    2675             :   // coincide with chunk and thus the trick is just not applicable.
    2676             :   DCHECK(!IsLargeObject(object));
    2677             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2678             : 
    2679             :   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
    2680             :   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
    2681             :   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
    2682             : 
    2683             :   const int len = object->length();
    2684             :   DCHECK(elements_to_trim <= len);
    2685             : 
    2686             :   // Calculate location of new array start.
    2687             :   Address old_start = object->address();
    2688      181760 :   Address new_start = old_start + bytes_to_trim;
    2689             : 
    2690      181760 :   if (incremental_marking()->IsMarking()) {
    2691             :     incremental_marking()->NotifyLeftTrimming(
    2692       20297 :         object, HeapObject::FromAddress(new_start));
    2693             :   }
    2694             : 
    2695             :   // Technically in new space this write might be omitted (except for
    2696             :   // debug mode which iterates through the heap), but to play safer
    2697             :   // we still do it.
    2698             :   HeapObject filler =
    2699      181760 :       CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
    2700             : 
    2701             :   // Initialize header of the trimmed array. Since left trimming is only
    2702             :   // performed on pages which are not concurrently swept creating a filler
    2703             :   // object does not require synchronization.
    2704      181760 :   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
    2705      363520 :   RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
    2706             :                       Smi::FromInt(len - elements_to_trim));
    2707             : 
    2708             :   FixedArrayBase new_object =
    2709      181760 :       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
    2710             : 
    2711             :   // Remove recorded slots for the new map and length offset.
    2712             :   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
    2713             :   ClearRecordedSlot(new_object, HeapObject::RawField(
    2714             :                                     new_object, FixedArrayBase::kLengthOffset));
    2715             : 
    2716             :   // Handle invalidated old-to-old slots.
    2717      181769 :   if (incremental_marking()->IsCompacting() &&
    2718           9 :       MayContainRecordedSlots(new_object)) {
    2719             :     // If the array was right-trimmed before, then it is registered in
    2720             :     // the invalidated_slots.
    2721             :     MemoryChunk::FromHeapObject(new_object)
    2722           5 :         ->MoveObjectWithInvalidatedSlots(filler, new_object);
    2723             :     // We have to clear slots in the free space to avoid stale old-to-old slots.
    2724             :     // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
    2725             :     // we need pointer granularity writes to avoid race with the concurrent
    2726             :     // marking.
    2727           5 :     if (filler->Size() > FreeSpace::kSize) {
    2728             :       MemsetTagged(HeapObject::RawField(filler, FreeSpace::kSize),
    2729             :                    ReadOnlyRoots(this).undefined_value(),
    2730           5 :                    (filler->Size() - FreeSpace::kSize) / kTaggedSize);
    2731             :     }
    2732             :   }
    2733             :   // Notify the heap profiler of change in object layout.
    2734      181760 :   OnMoveEvent(new_object, object, new_object->Size());
    2735             : 
    2736             : #ifdef ENABLE_SLOW_DCHECKS
    2737             :   if (FLAG_enable_slow_asserts) {
    2738             :     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
    2739             :     // to the original FixedArray (which is now the filler object).
    2740             :     LeftTrimmerVerifierRootVisitor root_visitor(object);
    2741             :     ReadOnlyRoots(this).Iterate(&root_visitor);
    2742             :     IterateRoots(&root_visitor, VISIT_ALL);
    2743             :   }
    2744             : #endif  // ENABLE_SLOW_DCHECKS
    2745             : 
    2746      181760 :   return new_object;
    2747             : }
    2748             : 
    2749     1439427 : void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
    2750             :   const int len = object->length();
    2751             :   DCHECK_LE(elements_to_trim, len);
    2752             :   DCHECK_GE(elements_to_trim, 0);
    2753             : 
    2754             :   int bytes_to_trim;
    2755             :   DCHECK(!object->IsFixedTypedArrayBase());
    2756     1439427 :   if (object->IsByteArray()) {
    2757       10240 :     int new_size = ByteArray::SizeFor(len - elements_to_trim);
    2758       10240 :     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
    2759             :     DCHECK_GE(bytes_to_trim, 0);
    2760     1429187 :   } else if (object->IsFixedArray()) {
    2761     1407888 :     CHECK_NE(elements_to_trim, len);
    2762     1407888 :     bytes_to_trim = elements_to_trim * kTaggedSize;
    2763             :   } else {
    2764             :     DCHECK(object->IsFixedDoubleArray());
    2765       21299 :     CHECK_NE(elements_to_trim, len);
    2766       21299 :     bytes_to_trim = elements_to_trim * kDoubleSize;
    2767             :   }
    2768             : 
    2769     1439427 :   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
    2770     1439427 : }
    2771             : 
    2772       17782 : void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
    2773             :                                    int elements_to_trim) {
    2774             :   // This function is safe to use only at the end of the mark compact
    2775             :   // collection: When marking, we record the weak slots, and shrinking
    2776             :   // invalidates them.
    2777             :   DCHECK_EQ(gc_state(), MARK_COMPACT);
    2778             :   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
    2779       17782 :                                        elements_to_trim * kTaggedSize);
    2780       17782 : }
    2781             : 
    2782             : template <typename T>
    2783     1457209 : void Heap::CreateFillerForArray(T object, int elements_to_trim,
    2784             :                                 int bytes_to_trim) {
    2785             :   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
    2786             :          object->IsWeakFixedArray());
    2787             : 
    2788             :   // For now this trick is only applied to objects in new and paged space.
    2789             :   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
    2790             : 
    2791     1457209 :   if (bytes_to_trim == 0) {
    2792             :     DCHECK_EQ(elements_to_trim, 0);
    2793             :     // No need to create filler and update live bytes counters.
    2794     1457209 :     return;
    2795             :   }
    2796             : 
    2797             :   // Calculate location of new array end.
    2798     1457209 :   int old_size = object->Size();
    2799     1457209 :   Address old_end = object->address() + old_size;
    2800     1457209 :   Address new_end = old_end - bytes_to_trim;
    2801             : 
    2802             :   // Register the array as an object with invalidated old-to-old slots. We
    2803             :   // cannot use NotifyObjectLayoutChange as it would mark the array black,
    2804             :   // which is not safe for left-trimming because left-trimming re-pushes
    2805             :   // only grey arrays onto the marking worklist.
    2806     1460006 :   if (incremental_marking()->IsCompacting() &&
    2807        2797 :       MayContainRecordedSlots(object)) {
    2808             :     // Ensure that the object survives because the InvalidatedSlotsFilter will
    2809             :     // compute its size from its map during pointers updating phase.
    2810          60 :     incremental_marking()->WhiteToGreyAndPush(object);
    2811          60 :     MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    2812             :         object, old_size);
    2813             :   }
    2814             : 
    2815             :   // Technically in new space this write might be omitted (except for
    2816             :   // debug mode which iterates through the heap), but to play safer
    2817             :   // we still do it.
    2818             :   // We do not create a filler for objects in a large object space.
    2819     1457209 :   if (!IsLargeObject(object)) {
    2820             :     HeapObject filler =
    2821     1457128 :         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
    2822             :     DCHECK(!filler.is_null());
    2823             :     // Clear the mark bits of the black area that belongs now to the filler.
    2824             :     // This is an optimization. The sweeper will release black fillers anyway.
    2825     1581698 :     if (incremental_marking()->black_allocation() &&
    2826             :         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
    2827         157 :       Page* page = Page::FromAddress(new_end);
    2828         157 :       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
    2829             :           page->AddressToMarkbitIndex(new_end),
    2830         157 :           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
    2831             :     }
    2832             :   }
    2833             : 
    2834             :   // Initialize header of the trimmed array. We are storing the new length
    2835             :   // using release store after creating a filler for the left-over space to
    2836             :   // avoid races with the sweeper thread.
    2837     1457209 :   object->synchronized_set_length(object->length() - elements_to_trim);
    2838             : 
    2839             :   // Notify the heap object allocation tracker of change in object layout. The
    2840             :   // array may not be moved during GC, and size has to be adjusted nevertheless.
    2841     2917242 :   for (auto& tracker : allocation_trackers_) {
    2842        5648 :     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
    2843             :   }
    2844             : }
    2845             : 
    2846           0 : void Heap::MakeHeapIterable() {
    2847        7585 :   mark_compact_collector()->EnsureSweepingCompleted();
    2848           0 : }
    2849             : 
    2850             : 
    2851             : static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
    2852             :   const double kMinMutatorUtilization = 0.0;
    2853             :   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
    2854       23545 :   if (mutator_speed == 0) return kMinMutatorUtilization;
    2855       21339 :   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
    2856             :   // Derivation:
    2857             :   // mutator_utilization = mutator_time / (mutator_time + gc_time)
    2858             :   // mutator_time = 1 / mutator_speed
    2859             :   // gc_time = 1 / gc_speed
    2860             :   // mutator_utilization = (1 / mutator_speed) /
    2861             :   //                       (1 / mutator_speed + 1 / gc_speed)
    2862             :   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
    2863       21339 :   return gc_speed / (mutator_speed + gc_speed);
    2864             : }
    2865             : 
    2866             : 
    2867       23527 : double Heap::YoungGenerationMutatorUtilization() {
    2868             :   double mutator_speed = static_cast<double>(
    2869       23527 :       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
    2870             :   double gc_speed =
    2871       23527 :       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
    2872             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2873       23527 :   if (FLAG_trace_mutator_utilization) {
    2874             :     isolate()->PrintWithTimestamp(
    2875             :         "Young generation mutator utilization = %.3f ("
    2876             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2877           0 :         result, mutator_speed, gc_speed);
    2878             :   }
    2879       23527 :   return result;
    2880             : }
    2881             : 
    2882             : 
    2883          18 : double Heap::OldGenerationMutatorUtilization() {
    2884             :   double mutator_speed = static_cast<double>(
    2885          18 :       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
    2886             :   double gc_speed = static_cast<double>(
    2887          18 :       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
    2888             :   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
    2889          18 :   if (FLAG_trace_mutator_utilization) {
    2890             :     isolate()->PrintWithTimestamp(
    2891             :         "Old generation mutator utilization = %.3f ("
    2892             :         "mutator_speed=%.f, gc_speed=%.f)\n",
    2893           0 :         result, mutator_speed, gc_speed);
    2894             :   }
    2895          18 :   return result;
    2896             : }
    2897             : 
    2898             : 
    2899           0 : bool Heap::HasLowYoungGenerationAllocationRate() {
    2900             :   const double high_mutator_utilization = 0.993;
    2901       23527 :   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
    2902             : }
    2903             : 
    2904             : 
    2905           0 : bool Heap::HasLowOldGenerationAllocationRate() {
    2906             :   const double high_mutator_utilization = 0.993;
    2907          18 :   return OldGenerationMutatorUtilization() > high_mutator_utilization;
    2908             : }
    2909             : 
    2910             : 
    2911          37 : bool Heap::HasLowAllocationRate() {
    2912          55 :   return HasLowYoungGenerationAllocationRate() &&
    2913          37 :          HasLowOldGenerationAllocationRate();
    2914             : }
    2915             : 
    2916           0 : bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
    2917             :                                     double mutator_utilization) {
    2918             :   const double kHighHeapPercentage = 0.8;
    2919             :   const double kLowMutatorUtilization = 0.4;
    2920       73758 :   return old_generation_size >=
    2921       73758 :              kHighHeapPercentage * max_old_generation_size_ &&
    2922           0 :          mutator_utilization < kLowMutatorUtilization;
    2923             : }
    2924             : 
    2925       74510 : void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
    2926             :                                        double mutator_utilization) {
    2927             :   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
    2928       74510 :   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
    2929       73758 :   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
    2930       73746 :     consecutive_ineffective_mark_compacts_ = 0;
    2931       73746 :     return;
    2932             :   }
    2933          12 :   ++consecutive_ineffective_mark_compacts_;
    2934          12 :   if (consecutive_ineffective_mark_compacts_ ==
    2935             :       kMaxConsecutiveIneffectiveMarkCompacts) {
    2936           0 :     if (InvokeNearHeapLimitCallback()) {
    2937             :       // The callback increased the heap limit.
    2938           0 :       consecutive_ineffective_mark_compacts_ = 0;
    2939           0 :       return;
    2940             :     }
    2941           0 :     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
    2942             :   }
    2943             : }
    2944             : 
    2945           0 : bool Heap::HasHighFragmentation() {
    2946           0 :   size_t used = OldGenerationSizeOfObjects();
    2947           0 :   size_t committed = CommittedOldGenerationMemory();
    2948           0 :   return HasHighFragmentation(used, committed);
    2949             : }
    2950             : 
    2951           0 : bool Heap::HasHighFragmentation(size_t used, size_t committed) {
    2952             :   const size_t kSlack = 16 * MB;
    2953             :   // Fragmentation is high if committed > 2 * used + kSlack.
    2954             :   // Rewrite the exression to avoid overflow.
    2955             :   DCHECK_GE(committed, used);
    2956       73785 :   return committed - used > used + kSlack;
    2957             : }
    2958             : 
    2959     1799967 : bool Heap::ShouldOptimizeForMemoryUsage() {
    2960     1799967 :   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
    2961     3599933 :   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
    2962     7199855 :          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
    2963     3599923 :          !CanExpandOldGeneration(kOldGenerationSlack);
    2964             : }
    2965             : 
    2966           0 : void Heap::ActivateMemoryReducerIfNeeded() {
    2967             :   // Activate memory reducer when switching to background if
    2968             :   // - there was no mark compact since the start.
    2969             :   // - the committed memory can be potentially reduced.
    2970             :   // 2 pages for the old, code, and map space + 1 page for new space.
    2971             :   const int kMinCommittedMemory = 7 * Page::kPageSize;
    2972           0 :   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
    2973           0 :       isolate()->IsIsolateInBackground()) {
    2974             :     MemoryReducer::Event event;
    2975           0 :     event.type = MemoryReducer::kPossibleGarbage;
    2976           0 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    2977           0 :     memory_reducer_->NotifyPossibleGarbage(event);
    2978             :   }
    2979           0 : }
    2980             : 
    2981      195511 : void Heap::ReduceNewSpaceSize() {
    2982             :   // TODO(ulan): Unify this constant with the similar constant in
    2983             :   // GCIdleTimeHandler once the change is merged to 4.5.
    2984             :   static const size_t kLowAllocationThroughput = 1000;
    2985             :   const double allocation_throughput =
    2986       98000 :       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
    2987             : 
    2988      196000 :   if (FLAG_predictable) return;
    2989             : 
    2990      195022 :   if (ShouldReduceMemory() ||
    2991       68983 :       ((allocation_throughput != 0) &&
    2992             :        (allocation_throughput < kLowAllocationThroughput))) {
    2993       23191 :     new_space_->Shrink();
    2994       46382 :     new_lo_space_->SetCapacity(new_space_->Capacity());
    2995             :     UncommitFromSpace();
    2996             :   }
    2997             : }
    2998             : 
    2999       30295 : void Heap::FinalizeIncrementalMarkingIfComplete(
    3000             :     GarbageCollectionReason gc_reason) {
    3001       90018 :   if (incremental_marking()->IsMarking() &&
    3002       51210 :       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
    3003       10021 :        (!incremental_marking()->finalize_marking_completed() &&
    3004       10023 :         mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3005           2 :         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
    3006        7648 :     FinalizeIncrementalMarkingIncrementally(gc_reason);
    3007       56310 :   } else if (incremental_marking()->IsComplete() ||
    3008       11885 :              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
    3009             :               local_embedder_heap_tracer()
    3010         869 :                   ->ShouldFinalizeIncrementalMarking())) {
    3011       12500 :     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3012             :   }
    3013       30295 : }
    3014             : 
    3015           5 : void Heap::FinalizeIncrementalMarkingAtomically(
    3016             :     GarbageCollectionReason gc_reason) {
    3017             :   DCHECK(!incremental_marking()->IsStopped());
    3018        2704 :   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
    3019           5 : }
    3020             : 
    3021       18496 : void Heap::FinalizeIncrementalMarkingIncrementally(
    3022             :     GarbageCollectionReason gc_reason) {
    3023       18496 :   if (FLAG_trace_incremental_marking) {
    3024             :     isolate()->PrintWithTimestamp(
    3025             :         "[IncrementalMarking] (%s).\n",
    3026           0 :         Heap::GarbageCollectionReasonToString(gc_reason));
    3027             :   }
    3028             : 
    3029             :   HistogramTimerScope incremental_marking_scope(
    3030       18496 :       isolate()->counters()->gc_incremental_marking_finalize());
    3031       55488 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
    3032       73984 :   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
    3033             : 
    3034             :   {
    3035             :     GCCallbacksScope scope(this);
    3036       18496 :     if (scope.CheckReenter()) {
    3037             :       AllowHeapAllocation allow_allocation;
    3038       73984 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
    3039       36992 :       VMState<EXTERNAL> state(isolate_);
    3040       18496 :       HandleScope handle_scope(isolate_);
    3041       36992 :       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3042             :     }
    3043             :   }
    3044       18496 :   incremental_marking()->FinalizeIncrementally();
    3045             :   {
    3046             :     GCCallbacksScope scope(this);
    3047       18496 :     if (scope.CheckReenter()) {
    3048             :       AllowHeapAllocation allow_allocation;
    3049       73984 :       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
    3050       36992 :       VMState<EXTERNAL> state(isolate_);
    3051       18496 :       HandleScope handle_scope(isolate_);
    3052       36992 :       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
    3053             :     }
    3054             :   }
    3055       18496 : }
    3056             : 
    3057       89910 : void Heap::RegisterDeserializedObjectsForBlackAllocation(
    3058             :     Reservation* reservations, const std::vector<HeapObject>& large_objects,
    3059             :     const std::vector<Address>& maps) {
    3060             :   // TODO(ulan): pause black allocation during deserialization to avoid
    3061             :   // iterating all these objects in one go.
    3062             : 
    3063      179820 :   if (!incremental_marking()->black_allocation()) return;
    3064             : 
    3065             :   // Iterate black objects in old space, code space, map space, and large
    3066             :   // object space for side effects.
    3067             :   IncrementalMarking::MarkingState* marking_state =
    3068             :       incremental_marking()->marking_state();
    3069       52644 :   for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
    3070       52644 :     const Heap::Reservation& res = reservations[i];
    3071      486752 :     for (auto& chunk : res) {
    3072      381464 :       Address addr = chunk.start;
    3073    16146202 :       while (addr < chunk.end) {
    3074    15383274 :         HeapObject obj = HeapObject::FromAddress(addr);
    3075             :         // Objects can have any color because incremental marking can
    3076             :         // start in the middle of Heap::ReserveSpace().
    3077    15383274 :         if (marking_state->IsBlack(obj)) {
    3078    15383274 :           incremental_marking()->ProcessBlackAllocatedObject(obj);
    3079             :         }
    3080    15383274 :         addr += obj->Size();
    3081             :       }
    3082             :     }
    3083             :   }
    3084             : 
    3085             :   // Large object space doesn't use reservations, so it needs custom handling.
    3086       26337 :   for (HeapObject object : large_objects) {
    3087          15 :     incremental_marking()->ProcessBlackAllocatedObject(object);
    3088             :   }
    3089             : 
    3090             :   // Map space doesn't use reservations, so it needs custom handling.
    3091     3293751 :   for (Address addr : maps) {
    3092             :     incremental_marking()->ProcessBlackAllocatedObject(
    3093     3267429 :         HeapObject::FromAddress(addr));
    3094             :   }
    3095             : }
    3096             : 
    3097    30634106 : void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
    3098             :                                     const DisallowHeapAllocation&) {
    3099    30634106 :   if (incremental_marking()->IsMarking()) {
    3100     3251736 :     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
    3101     3306603 :     if (incremental_marking()->IsCompacting() &&
    3102       54865 :         MayContainRecordedSlots(object)) {
    3103             :       MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
    3104        2276 :           object, size);
    3105             :     }
    3106             :   }
    3107             : #ifdef VERIFY_HEAP
    3108             :   if (FLAG_verify_heap) {
    3109             :     DCHECK(pending_layout_change_object_.is_null());
    3110             :     pending_layout_change_object_ = object;
    3111             :   }
    3112             : #endif
    3113    30634108 : }
    3114             : 
    3115             : #ifdef VERIFY_HEAP
    3116             : // Helper class for collecting slot addresses.
    3117             : class SlotCollectingVisitor final : public ObjectVisitor {
    3118             :  public:
    3119             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3120             :                      ObjectSlot end) override {
    3121             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3122             :   }
    3123             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3124             :                      MaybeObjectSlot end) final {
    3125             :     for (MaybeObjectSlot p = start; p < end; ++p) {
    3126             :       slots_.push_back(p);
    3127             :     }
    3128             :   }
    3129             : 
    3130             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    3131             : 
    3132             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3133             :     UNREACHABLE();
    3134             :   }
    3135             : 
    3136             :   int number_of_slots() { return static_cast<int>(slots_.size()); }
    3137             : 
    3138             :   MaybeObjectSlot slot(int i) { return slots_[i]; }
    3139             : 
    3140             :  private:
    3141             :   std::vector<MaybeObjectSlot> slots_;
    3142             : };
    3143             : 
    3144             : void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
    3145             :   if (!FLAG_verify_heap) return;
    3146             : 
    3147             :   // Check that Heap::NotifyObjectLayout was called for object transitions
    3148             :   // that are not safe for concurrent marking.
    3149             :   // If you see this check triggering for a freshly allocated object,
    3150             :   // use object->set_map_after_allocation() to initialize its map.
    3151             :   if (pending_layout_change_object_.is_null()) {
    3152             :     if (object->IsJSObject()) {
    3153             :       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
    3154             :     } else {
    3155             :       // Check that the set of slots before and after the transition match.
    3156             :       SlotCollectingVisitor old_visitor;
    3157             :       object->IterateFast(&old_visitor);
    3158             :       MapWord old_map_word = object->map_word();
    3159             :       // Temporarily set the new map to iterate new slots.
    3160             :       object->set_map_word(MapWord::FromMap(new_map));
    3161             :       SlotCollectingVisitor new_visitor;
    3162             :       object->IterateFast(&new_visitor);
    3163             :       // Restore the old map.
    3164             :       object->set_map_word(old_map_word);
    3165             :       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
    3166             :       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
    3167             :         DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
    3168             :       }
    3169             :     }
    3170             :   } else {
    3171             :     DCHECK_EQ(pending_layout_change_object_, object);
    3172             :     pending_layout_change_object_ = HeapObject();
    3173             :   }
    3174             : }
    3175             : #endif
    3176             : 
    3177         479 : GCIdleTimeHeapState Heap::ComputeHeapState() {
    3178             :   GCIdleTimeHeapState heap_state;
    3179         479 :   heap_state.contexts_disposed = contexts_disposed_;
    3180             :   heap_state.contexts_disposal_rate =
    3181         479 :       tracer()->ContextDisposalRateInMilliseconds();
    3182         479 :   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
    3183         479 :   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
    3184         479 :   return heap_state;
    3185             : }
    3186             : 
    3187             : 
    3188         479 : bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
    3189             :                                  GCIdleTimeHeapState heap_state,
    3190             :                                  double deadline_in_ms) {
    3191             :   bool result = false;
    3192         479 :   switch (action.type) {
    3193             :     case DONE:
    3194             :       result = true;
    3195         135 :       break;
    3196             :     case DO_INCREMENTAL_STEP: {
    3197             :       incremental_marking()->AdvanceWithDeadline(
    3198             :           deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
    3199          47 :           StepOrigin::kTask);
    3200             :       FinalizeIncrementalMarkingIfComplete(
    3201          47 :           GarbageCollectionReason::kFinalizeMarkingViaTask);
    3202             :       result = incremental_marking()->IsStopped();
    3203          47 :       break;
    3204             :     }
    3205             :     case DO_FULL_GC: {
    3206             :       DCHECK_LT(0, contexts_disposed_);
    3207         368 :       HistogramTimerScope scope(isolate_->counters()->gc_context());
    3208         552 :       TRACE_EVENT0("v8", "V8.GCContext");
    3209             :       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
    3210             :       break;
    3211             :     }
    3212             :     case DO_NOTHING:
    3213             :       break;
    3214             :   }
    3215             : 
    3216         479 :   return result;
    3217             : }
    3218             : 
    3219         479 : void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
    3220             :                                     GCIdleTimeHeapState heap_state,
    3221             :                                     double start_ms, double deadline_in_ms) {
    3222         479 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3223         479 :   double current_time = MonotonicallyIncreasingTimeInMs();
    3224         479 :   last_idle_notification_time_ = current_time;
    3225         479 :   double deadline_difference = deadline_in_ms - current_time;
    3226             : 
    3227         479 :   contexts_disposed_ = 0;
    3228             : 
    3229         479 :   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
    3230             :       FLAG_trace_idle_notification_verbose) {
    3231             :     isolate_->PrintWithTimestamp(
    3232             :         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
    3233             :         "ms, deadline usage %.2f ms [",
    3234             :         idle_time_in_ms, idle_time_in_ms - deadline_difference,
    3235           0 :         deadline_difference);
    3236           0 :     action.Print();
    3237           0 :     PrintF("]");
    3238           0 :     if (FLAG_trace_idle_notification_verbose) {
    3239           0 :       PrintF("[");
    3240           0 :       heap_state.Print();
    3241           0 :       PrintF("]");
    3242             :     }
    3243           0 :     PrintF("\n");
    3244             :   }
    3245         479 : }
    3246             : 
    3247             : 
    3248    27219566 : double Heap::MonotonicallyIncreasingTimeInMs() {
    3249    27219566 :   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
    3250    27217042 :          static_cast<double>(base::Time::kMillisecondsPerSecond);
    3251             : }
    3252             : 
    3253             : 
    3254           0 : bool Heap::IdleNotification(int idle_time_in_ms) {
    3255             :   return IdleNotification(
    3256           0 :       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
    3257           0 :       (static_cast<double>(idle_time_in_ms) /
    3258           0 :        static_cast<double>(base::Time::kMillisecondsPerSecond)));
    3259             : }
    3260             : 
    3261             : 
    3262         479 : bool Heap::IdleNotification(double deadline_in_seconds) {
    3263         479 :   CHECK(HasBeenSetUp());
    3264             :   double deadline_in_ms =
    3265             :       deadline_in_seconds *
    3266         479 :       static_cast<double>(base::Time::kMillisecondsPerSecond);
    3267             :   HistogramTimerScope idle_notification_scope(
    3268         958 :       isolate_->counters()->gc_idle_notification());
    3269        1437 :   TRACE_EVENT0("v8", "V8.GCIdleNotification");
    3270         479 :   double start_ms = MonotonicallyIncreasingTimeInMs();
    3271         479 :   double idle_time_in_ms = deadline_in_ms - start_ms;
    3272             : 
    3273             :   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
    3274         479 :                              OldGenerationAllocationCounter());
    3275             : 
    3276         479 :   GCIdleTimeHeapState heap_state = ComputeHeapState();
    3277             : 
    3278             :   GCIdleTimeAction action =
    3279         479 :       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
    3280             : 
    3281         479 :   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
    3282             : 
    3283         479 :   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
    3284         479 :   return result;
    3285             : }
    3286             : 
    3287             : 
    3288           0 : bool Heap::RecentIdleNotificationHappened() {
    3289           0 :   return (last_idle_notification_time_ +
    3290             :           GCIdleTimeHandler::kMaxScheduledIdleTime) >
    3291           0 :          MonotonicallyIncreasingTimeInMs();
    3292             : }
    3293             : 
    3294             : class MemoryPressureInterruptTask : public CancelableTask {
    3295             :  public:
    3296             :   explicit MemoryPressureInterruptTask(Heap* heap)
    3297          11 :       : CancelableTask(heap->isolate()), heap_(heap) {}
    3298             : 
    3299          22 :   ~MemoryPressureInterruptTask() override = default;
    3300             : 
    3301             :  private:
    3302             :   // v8::internal::CancelableTask overrides.
    3303          11 :   void RunInternal() override { heap_->CheckMemoryPressure(); }
    3304             : 
    3305             :   Heap* heap_;
    3306             :   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
    3307             : };
    3308             : 
    3309     1731076 : void Heap::CheckMemoryPressure() {
    3310     1731076 :   if (HighMemoryPressure()) {
    3311             :     // The optimizing compiler may be unnecessarily holding on to memory.
    3312        7625 :     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
    3313             :   }
    3314             :   MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
    3315             :   // Reset the memory pressure level to avoid recursive GCs triggered by
    3316             :   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
    3317             :   // the finalizers.
    3318             :   memory_pressure_level_ = MemoryPressureLevel::kNone;
    3319     1731076 :   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
    3320        7625 :     CollectGarbageOnMemoryPressure();
    3321     1723451 :   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
    3322           0 :     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3323             :       StartIncrementalMarking(kReduceMemoryFootprintMask,
    3324             :                               GarbageCollectionReason::kMemoryPressure);
    3325             :     }
    3326             :   }
    3327     1731076 :   if (memory_reducer_) {
    3328             :     MemoryReducer::Event event;
    3329     1731076 :     event.type = MemoryReducer::kPossibleGarbage;
    3330     1731076 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    3331     1731076 :     memory_reducer_->NotifyPossibleGarbage(event);
    3332             :   }
    3333     1731076 : }
    3334             : 
    3335        7625 : void Heap::CollectGarbageOnMemoryPressure() {
    3336             :   const int kGarbageThresholdInBytes = 8 * MB;
    3337             :   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
    3338             :   // This constant is the maximum response time in RAIL performance model.
    3339             :   const double kMaxMemoryPressurePauseMs = 100;
    3340             : 
    3341        7625 :   double start = MonotonicallyIncreasingTimeInMs();
    3342             :   CollectAllGarbage(kReduceMemoryFootprintMask,
    3343             :                     GarbageCollectionReason::kMemoryPressure,
    3344             :                     kGCCallbackFlagCollectAllAvailableGarbage);
    3345        7625 :   EagerlyFreeExternalMemory();
    3346        7625 :   double end = MonotonicallyIncreasingTimeInMs();
    3347             : 
    3348             :   // Estimate how much memory we can free.
    3349       22875 :   int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
    3350        7625 :                               isolate()->isolate_data()->external_memory_;
    3351             :   // If we can potentially free large amount of memory, then start GC right
    3352             :   // away instead of waiting for memory reducer.
    3353       10393 :   if (potential_garbage >= kGarbageThresholdInBytes &&
    3354        2768 :       potential_garbage >=
    3355        2768 :           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
    3356             :     // If we spent less than half of the time budget, then perform full GC
    3357             :     // Otherwise, start incremental marking.
    3358        2768 :     if (end - start < kMaxMemoryPressurePauseMs / 2) {
    3359             :       CollectAllGarbage(kReduceMemoryFootprintMask,
    3360             :                         GarbageCollectionReason::kMemoryPressure,
    3361             :                         kGCCallbackFlagCollectAllAvailableGarbage);
    3362             :     } else {
    3363           0 :       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
    3364             :         StartIncrementalMarking(kReduceMemoryFootprintMask,
    3365             :                                 GarbageCollectionReason::kMemoryPressure);
    3366             :       }
    3367             :     }
    3368             :   }
    3369        7625 : }
    3370             : 
    3371        7635 : void Heap::MemoryPressureNotification(MemoryPressureLevel level,
    3372             :                                       bool is_isolate_locked) {
    3373             :   MemoryPressureLevel previous = memory_pressure_level_;
    3374             :   memory_pressure_level_ = level;
    3375       15270 :   if ((previous != MemoryPressureLevel::kCritical &&
    3376        7645 :        level == MemoryPressureLevel::kCritical) ||
    3377          20 :       (previous == MemoryPressureLevel::kNone &&
    3378          10 :        level == MemoryPressureLevel::kModerate)) {
    3379        7630 :     if (is_isolate_locked) {
    3380        7619 :       CheckMemoryPressure();
    3381             :     } else {
    3382             :       ExecutionAccess access(isolate());
    3383          11 :       isolate()->stack_guard()->RequestGC();
    3384          11 :       auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
    3385          11 :           reinterpret_cast<v8::Isolate*>(isolate()));
    3386          11 :       taskrunner->PostTask(
    3387          44 :           base::make_unique<MemoryPressureInterruptTask>(this));
    3388             :     }
    3389             :   }
    3390        7635 : }
    3391             : 
    3392        8883 : void Heap::EagerlyFreeExternalMemory() {
    3393       35922 :   for (Page* page : *old_space()) {
    3394       27039 :     if (!page->SweepingDone()) {
    3395             :       base::MutexGuard guard(page->mutex());
    3396        4045 :       if (!page->SweepingDone()) {
    3397             :         ArrayBufferTracker::FreeDead(
    3398        1232 :             page, mark_compact_collector()->non_atomic_marking_state());
    3399             :       }
    3400             :     }
    3401             :   }
    3402        8883 :   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
    3403        8883 : }
    3404             : 
    3405        3406 : void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3406             :                                     void* data) {
    3407             :   const size_t kMaxCallbacks = 100;
    3408        6812 :   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
    3409        3406 :   for (auto callback_data : near_heap_limit_callbacks_) {
    3410           0 :     CHECK_NE(callback_data.first, callback);
    3411             :   }
    3412        6812 :   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
    3413        3406 : }
    3414             : 
    3415        3398 : void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
    3416             :                                        size_t heap_limit) {
    3417        6796 :   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
    3418        6796 :     if (near_heap_limit_callbacks_[i].first == callback) {
    3419        3398 :       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
    3420        3398 :       if (heap_limit) {
    3421           5 :         RestoreHeapLimit(heap_limit);
    3422             :       }
    3423        3398 :       return;
    3424             :     }
    3425             :   }
    3426           0 :   UNREACHABLE();
    3427             : }
    3428             : 
    3429           4 : void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
    3430             :   initial_max_old_generation_size_threshold_ =
    3431           4 :       initial_max_old_generation_size_ * threshold_percent;
    3432           4 : }
    3433             : 
    3434          83 : bool Heap::InvokeNearHeapLimitCallback() {
    3435         166 :   if (near_heap_limit_callbacks_.size() > 0) {
    3436             :     HandleScope scope(isolate());
    3437             :     v8::NearHeapLimitCallback callback =
    3438          22 :         near_heap_limit_callbacks_.back().first;
    3439          22 :     void* data = near_heap_limit_callbacks_.back().second;
    3440             :     size_t heap_limit = callback(data, max_old_generation_size_,
    3441          22 :                                  initial_max_old_generation_size_);
    3442          22 :     if (heap_limit > max_old_generation_size_) {
    3443          22 :       max_old_generation_size_ = heap_limit;
    3444             :       return true;
    3445             :     }
    3446             :   }
    3447             :   return false;
    3448             : }
    3449             : 
    3450           0 : void Heap::CollectCodeStatistics() {
    3451           0 :   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
    3452           0 :   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
    3453             :   // We do not look for code in new space, or map space.  If code
    3454             :   // somehow ends up in those spaces, we would miss it here.
    3455           0 :   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
    3456           0 :   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
    3457           0 :   CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
    3458           0 : }
    3459             : 
    3460             : #ifdef DEBUG
    3461             : 
    3462             : void Heap::Print() {
    3463             :   if (!HasBeenSetUp()) return;
    3464             :   isolate()->PrintStack(stdout);
    3465             : 
    3466             :   for (SpaceIterator it(this); it.has_next();) {
    3467             :     it.next()->Print();
    3468             :   }
    3469             : }
    3470             : 
    3471             : 
    3472             : void Heap::ReportCodeStatistics(const char* title) {
    3473             :   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
    3474             :   CollectCodeStatistics();
    3475             :   CodeStatistics::ReportCodeStatistics(isolate());
    3476             : }
    3477             : 
    3478             : #endif  // DEBUG
    3479             : 
    3480       97985 : const char* Heap::GarbageCollectionReasonToString(
    3481             :     GarbageCollectionReason gc_reason) {
    3482       97985 :   switch (gc_reason) {
    3483             :     case GarbageCollectionReason::kAllocationFailure:
    3484             :       return "allocation failure";
    3485             :     case GarbageCollectionReason::kAllocationLimit:
    3486           0 :       return "allocation limit";
    3487             :     case GarbageCollectionReason::kContextDisposal:
    3488         184 :       return "context disposal";
    3489             :     case GarbageCollectionReason::kCountersExtension:
    3490           0 :       return "counters extension";
    3491             :     case GarbageCollectionReason::kDebugger:
    3492       14350 :       return "debugger";
    3493             :     case GarbageCollectionReason::kDeserializer:
    3494           1 :       return "deserialize";
    3495             :     case GarbageCollectionReason::kExternalMemoryPressure:
    3496         916 :       return "external memory pressure";
    3497             :     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
    3498        6230 :       return "finalize incremental marking via stack guard";
    3499             :     case GarbageCollectionReason::kFinalizeMarkingViaTask:
    3500       12500 :       return "finalize incremental marking via task";
    3501             :     case GarbageCollectionReason::kFullHashtable:
    3502           0 :       return "full hash-table";
    3503             :     case GarbageCollectionReason::kHeapProfiler:
    3504        1141 :       return "heap profiler";
    3505             :     case GarbageCollectionReason::kIdleTask:
    3506        1988 :       return "idle task";
    3507             :     case GarbageCollectionReason::kLastResort:
    3508          44 :       return "last resort";
    3509             :     case GarbageCollectionReason::kLowMemoryNotification:
    3510        1041 :       return "low memory notification";
    3511             :     case GarbageCollectionReason::kMakeHeapIterable:
    3512           0 :       return "make heap iterable";
    3513             :     case GarbageCollectionReason::kMemoryPressure:
    3514       10393 :       return "memory pressure";
    3515             :     case GarbageCollectionReason::kMemoryReducer:
    3516           0 :       return "memory reducer";
    3517             :     case GarbageCollectionReason::kRuntime:
    3518         325 :       return "runtime";
    3519             :     case GarbageCollectionReason::kSamplingProfiler:
    3520          20 :       return "sampling profiler";
    3521             :     case GarbageCollectionReason::kSnapshotCreator:
    3522         382 :       return "snapshot creator";
    3523             :     case GarbageCollectionReason::kTesting:
    3524       28517 :       return "testing";
    3525             :     case GarbageCollectionReason::kExternalFinalize:
    3526           5 :       return "external finalize";
    3527             :     case GarbageCollectionReason::kUnknown:
    3528           5 :       return "unknown";
    3529             :   }
    3530           0 :   UNREACHABLE();
    3531             : }
    3532             : 
    3533     1936839 : bool Heap::Contains(HeapObject value) {
    3534     1936839 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3535             :     return false;
    3536             :   }
    3537     3873678 :   return HasBeenSetUp() &&
    3538     1922538 :          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
    3539           0 :           code_space_->Contains(value) || map_space_->Contains(value) ||
    3540           0 :           lo_space_->Contains(value) || read_only_space_->Contains(value) ||
    3541           0 :           code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
    3542             : }
    3543             : 
    3544          70 : bool Heap::InSpace(HeapObject value, AllocationSpace space) {
    3545          70 :   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
    3546             :     return false;
    3547             :   }
    3548          70 :   if (!HasBeenSetUp()) return false;
    3549             : 
    3550          70 :   switch (space) {
    3551             :     case NEW_SPACE:
    3552          15 :       return new_space_->ToSpaceContains(value);
    3553             :     case OLD_SPACE:
    3554          15 :       return old_space_->Contains(value);
    3555             :     case CODE_SPACE:
    3556           0 :       return code_space_->Contains(value);
    3557             :     case MAP_SPACE:
    3558           0 :       return map_space_->Contains(value);
    3559             :     case LO_SPACE:
    3560          30 :       return lo_space_->Contains(value);
    3561             :     case CODE_LO_SPACE:
    3562          10 :       return code_lo_space_->Contains(value);
    3563             :     case NEW_LO_SPACE:
    3564           0 :       return new_lo_space_->Contains(value);
    3565             :     case RO_SPACE:
    3566           0 :       return read_only_space_->Contains(value);
    3567             :   }
    3568           0 :   UNREACHABLE();
    3569             : }
    3570             : 
    3571           0 : bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
    3572           0 :   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
    3573             :     return false;
    3574             :   }
    3575           0 :   if (!HasBeenSetUp()) return false;
    3576             : 
    3577           0 :   switch (space) {
    3578             :     case NEW_SPACE:
    3579           0 :       return new_space_->ToSpaceContainsSlow(addr);
    3580             :     case OLD_SPACE:
    3581           0 :       return old_space_->ContainsSlow(addr);
    3582             :     case CODE_SPACE:
    3583           0 :       return code_space_->ContainsSlow(addr);
    3584             :     case MAP_SPACE:
    3585           0 :       return map_space_->ContainsSlow(addr);
    3586             :     case LO_SPACE:
    3587           0 :       return lo_space_->ContainsSlow(addr);
    3588             :     case CODE_LO_SPACE:
    3589           0 :       return code_lo_space_->ContainsSlow(addr);
    3590             :     case NEW_LO_SPACE:
    3591           0 :       return new_lo_space_->ContainsSlow(addr);
    3592             :     case RO_SPACE:
    3593           0 :       return read_only_space_->ContainsSlow(addr);
    3594             :   }
    3595           0 :   UNREACHABLE();
    3596             : }
    3597             : 
    3598          40 : bool Heap::IsValidAllocationSpace(AllocationSpace space) {
    3599          40 :   switch (space) {
    3600             :     case NEW_SPACE:
    3601             :     case OLD_SPACE:
    3602             :     case CODE_SPACE:
    3603             :     case MAP_SPACE:
    3604             :     case LO_SPACE:
    3605             :     case NEW_LO_SPACE:
    3606             :     case CODE_LO_SPACE:
    3607             :     case RO_SPACE:
    3608             :       return true;
    3609             :     default:
    3610           0 :       return false;
    3611             :   }
    3612             : }
    3613             : 
    3614             : #ifdef VERIFY_HEAP
    3615             : class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
    3616             :  public:
    3617             :   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
    3618             :       : VerifyPointersVisitor(heap) {}
    3619             : 
    3620             :  protected:
    3621             :   void VerifyPointers(HeapObject host, MaybeObjectSlot start,
    3622             :                       MaybeObjectSlot end) override {
    3623             :     if (!host.is_null()) {
    3624             :       CHECK(heap_->InReadOnlySpace(host->map()));
    3625             :     }
    3626             :     VerifyPointersVisitor::VerifyPointers(host, start, end);
    3627             : 
    3628             :     for (MaybeObjectSlot current = start; current < end; ++current) {
    3629             :       HeapObject heap_object;
    3630             :       if ((*current)->GetHeapObject(&heap_object)) {
    3631             :         CHECK(heap_->InReadOnlySpace(heap_object));
    3632             :       }
    3633             :     }
    3634             :   }
    3635             : };
    3636             : 
    3637             : void Heap::Verify() {
    3638             :   CHECK(HasBeenSetUp());
    3639             :   HandleScope scope(isolate());
    3640             : 
    3641             :   // We have to wait here for the sweeper threads to have an iterable heap.
    3642             :   mark_compact_collector()->EnsureSweepingCompleted();
    3643             : 
    3644             :   VerifyPointersVisitor visitor(this);
    3645             :   IterateRoots(&visitor, VISIT_ONLY_STRONG);
    3646             : 
    3647             :   if (!isolate()->context().is_null() &&
    3648             :       !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
    3649             :     NormalizedMapCache::cast(*isolate()->normalized_map_cache())
    3650             :         ->NormalizedMapCacheVerify(isolate());
    3651             :   }
    3652             : 
    3653             :   VerifySmisVisitor smis_visitor;
    3654             :   IterateSmiRoots(&smis_visitor);
    3655             : 
    3656             :   new_space_->Verify(isolate());
    3657             : 
    3658             :   old_space_->Verify(isolate(), &visitor);
    3659             :   map_space_->Verify(isolate(), &visitor);
    3660             : 
    3661             :   VerifyPointersVisitor no_dirty_regions_visitor(this);
    3662             :   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
    3663             : 
    3664             :   lo_space_->Verify(isolate());
    3665             :   code_lo_space_->Verify(isolate());
    3666             :   new_lo_space_->Verify(isolate());
    3667             : 
    3668             :   VerifyReadOnlyPointersVisitor read_only_visitor(this);
    3669             :   read_only_space_->Verify(isolate(), &read_only_visitor);
    3670             : }
    3671             : 
    3672             : class SlotVerifyingVisitor : public ObjectVisitor {
    3673             :  public:
    3674             :   SlotVerifyingVisitor(std::set<Address>* untyped,
    3675             :                        std::set<std::pair<SlotType, Address> >* typed)
    3676             :       : untyped_(untyped), typed_(typed) {}
    3677             : 
    3678             :   virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
    3679             : 
    3680             :   void VisitPointers(HeapObject host, ObjectSlot start,
    3681             :                      ObjectSlot end) override {
    3682             : #ifdef DEBUG
    3683             :     for (ObjectSlot slot = start; slot < end; ++slot) {
    3684             :       DCHECK(!HasWeakHeapObjectTag(*slot));
    3685             :     }
    3686             : #endif  // DEBUG
    3687             :     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    3688             :   }
    3689             : 
    3690             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3691             :                      MaybeObjectSlot end) final {
    3692             :     for (MaybeObjectSlot slot = start; slot < end; ++slot) {
    3693             :       if (ShouldHaveBeenRecorded(host, *slot)) {
    3694             :         CHECK_GT(untyped_->count(slot.address()), 0);
    3695             :       }
    3696             :     }
    3697             :   }
    3698             : 
    3699             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3700             :     Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3701             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3702             :       CHECK(
    3703             :           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
    3704             :           (rinfo->IsInConstantPool() &&
    3705             :            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
    3706             :     }
    3707             :   }
    3708             : 
    3709             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3710             :     Object target = rinfo->target_object();
    3711             :     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
    3712             :       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
    3713             :             (rinfo->IsInConstantPool() &&
    3714             :              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
    3715             :     }
    3716             :   }
    3717             : 
    3718             :  private:
    3719             :   bool InTypedSet(SlotType type, Address slot) {
    3720             :     return typed_->count(std::make_pair(type, slot)) > 0;
    3721             :   }
    3722             :   std::set<Address>* untyped_;
    3723             :   std::set<std::pair<SlotType, Address> >* typed_;
    3724             : };
    3725             : 
    3726             : class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
    3727             :  public:
    3728             :   OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
    3729             :                                std::set<std::pair<SlotType, Address>>* typed)
    3730             :       : SlotVerifyingVisitor(untyped, typed) {}
    3731             : 
    3732             :   bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
    3733             :     DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
    3734             :                    Heap::InToPage(target));
    3735             :     return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
    3736             :            !Heap::InYoungGeneration(host);
    3737             :   }
    3738             : };
    3739             : 
    3740             : template <RememberedSetType direction>
    3741             : void CollectSlots(MemoryChunk* chunk, Address start, Address end,
    3742             :                   std::set<Address>* untyped,
    3743             :                   std::set<std::pair<SlotType, Address> >* typed) {
    3744             :   RememberedSet<direction>::Iterate(
    3745             :       chunk,
    3746             :       [start, end, untyped](MaybeObjectSlot slot) {
    3747             :         if (start <= slot.address() && slot.address() < end) {
    3748             :           untyped->insert(slot.address());
    3749             :         }
    3750             :         return KEEP_SLOT;
    3751             :       },
    3752             :       SlotSet::PREFREE_EMPTY_BUCKETS);
    3753             :   RememberedSet<direction>::IterateTyped(
    3754             :       chunk, [=](SlotType type, Address slot) {
    3755             :         if (start <= slot && slot < end) {
    3756             :           typed->insert(std::make_pair(type, slot));
    3757             :         }
    3758             :         return KEEP_SLOT;
    3759             :       });
    3760             : }
    3761             : 
    3762             : void Heap::VerifyRememberedSetFor(HeapObject object) {
    3763             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3764             :   DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
    3765             :   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
    3766             :   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
    3767             :       chunk->mutex());
    3768             :   Address start = object->address();
    3769             :   Address end = start + object->Size();
    3770             :   std::set<Address> old_to_new;
    3771             :   std::set<std::pair<SlotType, Address> > typed_old_to_new;
    3772             :   if (!InYoungGeneration(object)) {
    3773             :     store_buffer()->MoveAllEntriesToRememberedSet();
    3774             :     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
    3775             :     OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
    3776             :     object->IterateBody(&visitor);
    3777             :   }
    3778             :   // TODO(ulan): Add old to old slot set verification once all weak objects
    3779             :   // have their own instance types and slots are recorded for all weal fields.
    3780             : }
    3781             : #endif
    3782             : 
    3783             : #ifdef DEBUG
    3784             : void Heap::VerifyCountersAfterSweeping() {
    3785             :   PagedSpaces spaces(this);
    3786             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3787             :        space = spaces.next()) {
    3788             :     space->VerifyCountersAfterSweeping();
    3789             :   }
    3790             : }
    3791             : 
    3792             : void Heap::VerifyCountersBeforeConcurrentSweeping() {
    3793             :   PagedSpaces spaces(this);
    3794             :   for (PagedSpace* space = spaces.next(); space != nullptr;
    3795             :        space = spaces.next()) {
    3796             :     space->VerifyCountersBeforeConcurrentSweeping();
    3797             :   }
    3798             : }
    3799             : #endif
    3800             : 
    3801           0 : void Heap::ZapFromSpace() {
    3802           0 :   if (!new_space_->IsFromSpaceCommitted()) return;
    3803           0 :   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
    3804             :     memory_allocator()->ZapBlock(page->area_start(),
    3805           0 :                                  page->HighWaterMark() - page->area_start(),
    3806           0 :                                  ZapValue());
    3807             :   }
    3808             : }
    3809             : 
    3810     1893278 : void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
    3811             : #ifdef DEBUG
    3812             :   DCHECK(IsAligned(start_address, kIntSize));
    3813             :   for (int i = 0; i < size_in_bytes / kIntSize; i++) {
    3814             :     Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
    3815             :   }
    3816             : #endif
    3817     1893278 : }
    3818             : 
    3819             : // TODO(ishell): move builtin accessors out from Heap.
    3820   143078669 : Code Heap::builtin(int index) {
    3821             :   DCHECK(Builtins::IsBuiltinId(index));
    3822   286158432 :   return Code::cast(Object(isolate()->builtins_table()[index]));
    3823             : }
    3824             : 
    3825    86017549 : Address Heap::builtin_address(int index) {
    3826             :   DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
    3827   473314086 :   return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
    3828             : }
    3829             : 
    3830      252840 : void Heap::set_builtin(int index, Code builtin) {
    3831             :   DCHECK(Builtins::IsBuiltinId(index));
    3832             :   DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
    3833             :   // The given builtin may be completely uninitialized thus we cannot check its
    3834             :   // type here.
    3835      505680 :   isolate()->builtins_table()[index] = builtin.ptr();
    3836      252840 : }
    3837             : 
    3838       99688 : void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
    3839       99688 :   IterateStrongRoots(v, mode);
    3840       99688 :   IterateWeakRoots(v, mode);
    3841       99688 : }
    3842             : 
    3843      160937 : void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
    3844      321874 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3845      321874 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3846             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3847             :   v->VisitRootPointer(Root::kStringTable, nullptr,
    3848      482811 :                       FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
    3849      160937 :   v->Synchronize(VisitorSynchronization::kStringTable);
    3850      160937 :   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
    3851             :       mode != VISIT_FOR_SERIALIZATION) {
    3852             :     // Scavenge collections have special processing for this.
    3853             :     // Do not visit for serialization, since the external string table will
    3854             :     // be populated from scratch upon deserialization.
    3855        1688 :     external_string_table_.IterateAll(v);
    3856             :   }
    3857      160937 :   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
    3858      160937 : }
    3859             : 
    3860       61249 : void Heap::IterateSmiRoots(RootVisitor* v) {
    3861             :   // Acquire execution access since we are going to read stack limit values.
    3862             :   ExecutionAccess access(isolate());
    3863             :   v->VisitRootPointers(Root::kSmiRootList, nullptr,
    3864             :                        roots_table().smi_roots_begin(),
    3865      122498 :                        roots_table().smi_roots_end());
    3866       61249 :   v->Synchronize(VisitorSynchronization::kSmiRootList);
    3867       61249 : }
    3868             : 
    3869             : // We cannot avoid stale handles to left-trimmed objects, but can only make
    3870             : // sure all handles still needed are updated. Filter out a stale pointer
    3871             : // and clear the slot to allow post processing of handles (needed because
    3872             : // the sweeper might actually free the underlying page).
    3873           0 : class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
    3874             :  public:
    3875      280837 :   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
    3876             :     USE(heap_);
    3877             :   }
    3878             : 
    3879           0 :   void VisitRootPointer(Root root, const char* description,
    3880             :                         FullObjectSlot p) override {
    3881           0 :     FixHandle(p);
    3882           0 :   }
    3883             : 
    3884      649220 :   void VisitRootPointers(Root root, const char* description,
    3885             :                          FullObjectSlot start, FullObjectSlot end) override {
    3886    88573311 :     for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
    3887      649221 :   }
    3888             : 
    3889             :  private:
    3890    87274870 :   inline void FixHandle(FullObjectSlot p) {
    3891   184626735 :     if (!(*p)->IsHeapObject()) return;
    3892    77197876 :     HeapObject current = HeapObject::cast(*p);
    3893             :     const MapWord map_word = current->map_word();
    3894   152976711 :     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
    3895             : #ifdef DEBUG
    3896             :       // We need to find a FixedArrayBase map after walking the fillers.
    3897             :       while (current->IsFiller()) {
    3898             :         Address next = current->ptr();
    3899             :         if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
    3900             :           next += kTaggedSize;
    3901             :         } else if (current->map() ==
    3902             :                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
    3903             :           next += 2 * kTaggedSize;
    3904             :         } else {
    3905             :           next += current->Size();
    3906             :         }
    3907             :         current = HeapObject::cast(Object(next));
    3908             :       }
    3909             :       DCHECK(current->IsFixedArrayBase());
    3910             : #endif  // DEBUG
    3911             :       p.store(Smi::kZero);
    3912             :     }
    3913             :   }
    3914             : 
    3915             :   Heap* heap_;
    3916             : };
    3917             : 
    3918      280837 : void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
    3919      561674 :   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
    3920      561674 :                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
    3921             :                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
    3922             :   v->VisitRootPointers(Root::kStrongRootList, nullptr,
    3923             :                        roots_table().strong_roots_begin(),
    3924      561674 :                        roots_table().strong_roots_end());
    3925      280837 :   v->Synchronize(VisitorSynchronization::kStrongRootList);
    3926             : 
    3927     2601133 :   isolate_->bootstrapper()->Iterate(v);
    3928      280837 :   v->Synchronize(VisitorSynchronization::kBootstrapper);
    3929      280837 :   isolate_->Iterate(v);
    3930      280837 :   v->Synchronize(VisitorSynchronization::kTop);
    3931      280837 :   Relocatable::Iterate(isolate_, v);
    3932      280837 :   v->Synchronize(VisitorSynchronization::kRelocatable);
    3933      561674 :   isolate_->debug()->Iterate(v);
    3934      280837 :   v->Synchronize(VisitorSynchronization::kDebug);
    3935             : 
    3936      561674 :   isolate_->compilation_cache()->Iterate(v);
    3937      280837 :   v->Synchronize(VisitorSynchronization::kCompilationCache);
    3938             : 
    3939             :   // Iterate over local handles in handle scopes.
    3940             :   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
    3941      561674 :   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
    3942      561674 :   isolate_->handle_scope_implementer()->Iterate(v);
    3943      280837 :   isolate_->IterateDeferredHandles(v);
    3944      280837 :   v->Synchronize(VisitorSynchronization::kHandleScope);
    3945             : 
    3946             :   // Iterate over the builtin code objects and code stubs in the
    3947             :   // heap. Note that it is not necessary to iterate over code objects
    3948             :   // on scavenge collections.
    3949      280837 :   if (!isMinorGC) {
    3950      257347 :     IterateBuiltins(v);
    3951      257347 :     v->Synchronize(VisitorSynchronization::kBuiltins);
    3952             : 
    3953             :     // The dispatch table is set up directly from the builtins using
    3954             :     // IntitializeDispatchTable so there is no need to iterate to create it.
    3955      257347 :     if (mode != VISIT_FOR_SERIALIZATION) {
    3956             :       // Currently we iterate the dispatch table to update pointers to possibly
    3957             :       // moved Code objects for bytecode handlers.
    3958             :       // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
    3959             :       // immovable) in every build configuration.
    3960      392196 :       isolate_->interpreter()->IterateDispatchTable(v);
    3961      196098 :       v->Synchronize(VisitorSynchronization::kDispatchTable);
    3962             :     }
    3963             :   }
    3964             : 
    3965             :   // Iterate over global handles.
    3966      280837 :   switch (mode) {
    3967             :     case VISIT_FOR_SERIALIZATION:
    3968             :       // Global handles are not iterated by the serializer. Values referenced by
    3969             :       // global handles need to be added manually.
    3970             :       break;
    3971             :     case VISIT_ONLY_STRONG:
    3972      240596 :       isolate_->global_handles()->IterateStrongRoots(v);
    3973      120298 :       break;
    3974             :     case VISIT_ALL_IN_SCAVENGE:
    3975             :     case VISIT_ALL_IN_MINOR_MC_MARK:
    3976       46980 :       isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
    3977       23490 :       break;
    3978             :     case VISIT_ALL_IN_MINOR_MC_UPDATE:
    3979           0 :       isolate_->global_handles()->IterateAllYoungRoots(v);
    3980           0 :       break;
    3981             :     case VISIT_ALL_IN_SWEEP_NEWSPACE:
    3982             :     case VISIT_ALL:
    3983      151600 :       isolate_->global_handles()->IterateAllRoots(v);
    3984       75800 :       break;
    3985             :   }
    3986      280837 :   v->Synchronize(VisitorSynchronization::kGlobalHandles);
    3987             : 
    3988             :   // Iterate over eternal handles. Eternal handles are not iterated by the
    3989             :   // serializer. Values referenced by eternal handles need to be added manually.
    3990      280837 :   if (mode != VISIT_FOR_SERIALIZATION) {
    3991      219588 :     if (isMinorGC) {
    3992       46980 :       isolate_->eternal_handles()->IterateYoungRoots(v);
    3993             :     } else {
    3994      392196 :       isolate_->eternal_handles()->IterateAllRoots(v);
    3995             :     }
    3996             :   }
    3997      280837 :   v->Synchronize(VisitorSynchronization::kEternalHandles);
    3998             : 
    3999             :   // Iterate over pointers being held by inactive threads.
    4000      561674 :   isolate_->thread_manager()->Iterate(v);
    4001      280837 :   v->Synchronize(VisitorSynchronization::kThreadManager);
    4002             : 
    4003             :   // Iterate over other strong roots (currently only identity maps).
    4004      561959 :   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
    4005      281122 :     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
    4006             :   }
    4007      280837 :   v->Synchronize(VisitorSynchronization::kStrongRoots);
    4008             : 
    4009             :   // Iterate over pending Microtasks stored in MicrotaskQueues.
    4010      280837 :   MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
    4011      280837 :   if (default_microtask_queue) {
    4012      280837 :     MicrotaskQueue* microtask_queue = default_microtask_queue;
    4013      280837 :     do {
    4014      280837 :       microtask_queue->IterateMicrotasks(v);
    4015             :       microtask_queue = microtask_queue->next();
    4016             :     } while (microtask_queue != default_microtask_queue);
    4017             :   }
    4018             : 
    4019             :   // Iterate over the partial snapshot cache unless serializing or
    4020             :   // deserializing.
    4021      280837 :   if (mode != VISIT_FOR_SERIALIZATION) {
    4022      219588 :     SerializerDeserializer::Iterate(isolate_, v);
    4023      219588 :     v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
    4024             :   }
    4025      280837 : }
    4026             : 
    4027         398 : void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
    4028         398 :   isolate_->global_handles()->IterateWeakRoots(v);
    4029         398 : }
    4030             : 
    4031      257399 : void Heap::IterateBuiltins(RootVisitor* v) {
    4032   387554100 :   for (int i = 0; i < Builtins::builtin_count; i++) {
    4033             :     v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
    4034   774593290 :                         FullObjectSlot(builtin_address(i)));
    4035             :   }
    4036             : #ifdef V8_EMBEDDED_BUILTINS
    4037             :   // The entry table does not need to be updated if all builtins are embedded.
    4038             :   STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
    4039             : #else
    4040             :   // If builtins are not embedded, they may move and thus the entry table must
    4041             :   // be updated.
    4042             :   // TODO(v8:6666): Remove once builtins are embedded unconditionally.
    4043             :   Builtins::UpdateBuiltinEntryTable(isolate());
    4044             : #endif  // V8_EMBEDDED_BUILTINS
    4045      257347 : }
    4046             : 
    4047             : // TODO(1236194): Since the heap size is configurable on the command line
    4048             : // and through the API, we should gracefully handle the case that the heap
    4049             : // size is not big enough to fit all the initial objects.
    4050       61048 : void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
    4051             :                          size_t max_old_generation_size_in_mb,
    4052             :                          size_t code_range_size_in_mb) {
    4053             :   // Overwrite default configuration.
    4054       61048 :   if (max_semi_space_size_in_kb != 0) {
    4055             :     max_semi_space_size_ =
    4056       59114 :         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
    4057             :   }
    4058       61048 :   if (max_old_generation_size_in_mb != 0) {
    4059       29561 :     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
    4060             :   }
    4061             : 
    4062             :   // If max space size flags are specified overwrite the configuration.
    4063       61048 :   if (FLAG_max_semi_space_size > 0) {
    4064         186 :     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
    4065             :   }
    4066       61048 :   if (FLAG_max_old_space_size > 0) {
    4067             :     max_old_generation_size_ =
    4068          39 :         static_cast<size_t>(FLAG_max_old_space_size) * MB;
    4069             :   }
    4070             : 
    4071             :   if (Page::kPageSize > MB) {
    4072             :     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
    4073             :     max_old_generation_size_ =
    4074             :         RoundUp<Page::kPageSize>(max_old_generation_size_);
    4075             :   }
    4076             : 
    4077       61048 :   if (FLAG_stress_compaction) {
    4078             :     // This will cause more frequent GCs when stressing.
    4079          96 :     max_semi_space_size_ = MB;
    4080             :   }
    4081             : 
    4082             :   // The new space size must be a power of two to support single-bit testing
    4083             :   // for containment.
    4084             :   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
    4085       61048 :       static_cast<uint64_t>(max_semi_space_size_)));
    4086             : 
    4087       61048 :   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
    4088             :     // Start with at least 1*MB semi-space on machines with a lot of memory.
    4089             :     initial_semispace_size_ =
    4090      121684 :         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
    4091             :   }
    4092             : 
    4093       61048 :   if (FLAG_min_semi_space_size > 0) {
    4094             :     size_t initial_semispace_size =
    4095          35 :         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
    4096          35 :     if (initial_semispace_size > max_semi_space_size_) {
    4097           5 :       initial_semispace_size_ = max_semi_space_size_;
    4098           5 :       if (FLAG_trace_gc) {
    4099             :         PrintIsolate(isolate_,
    4100             :                      "Min semi-space size cannot be more than the maximum "
    4101             :                      "semi-space size of %" PRIuS " MB\n",
    4102           0 :                      max_semi_space_size_ / MB);
    4103             :       }
    4104             :     } else {
    4105             :       initial_semispace_size_ =
    4106          30 :           RoundUp<Page::kPageSize>(initial_semispace_size);
    4107             :     }
    4108             :   }
    4109             : 
    4110      122098 :   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
    4111             : 
    4112       61049 :   if (FLAG_semi_space_growth_factor < 2) {
    4113           0 :     FLAG_semi_space_growth_factor = 2;
    4114             :   }
    4115             : 
    4116             :   // The old generation is paged and needs at least one page for each space.
    4117             :   int paged_space_count =
    4118             :       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
    4119             :   initial_max_old_generation_size_ = max_old_generation_size_ =
    4120             :       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
    4121      122098 :           max_old_generation_size_);
    4122             : 
    4123       61049 :   if (FLAG_initial_old_space_size > 0) {
    4124           0 :     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
    4125             :   } else {
    4126             :     initial_old_generation_size_ =
    4127       61049 :         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
    4128             :   }
    4129       61049 :   old_generation_allocation_limit_ = initial_old_generation_size_;
    4130             : 
    4131             :   // We rely on being able to allocate new arrays in paged spaces.
    4132             :   DCHECK(kMaxRegularHeapObjectSize >=
    4133             :          (JSArray::kSize +
    4134             :           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
    4135             :           AllocationMemento::kSize));
    4136             : 
    4137       61049 :   code_range_size_ = code_range_size_in_mb * MB;
    4138             : 
    4139       61049 :   configured_ = true;
    4140       61049 : }
    4141             : 
    4142             : 
    4143       97980 : void Heap::AddToRingBuffer(const char* string) {
    4144             :   size_t first_part =
    4145       97980 :       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
    4146       97980 :   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
    4147       97980 :   ring_buffer_end_ += first_part;
    4148       97980 :   if (first_part < strlen(string)) {
    4149       26298 :     ring_buffer_full_ = true;
    4150       26298 :     size_t second_part = strlen(string) - first_part;
    4151       26298 :     memcpy(trace_ring_buffer_, string + first_part, second_part);
    4152       26298 :     ring_buffer_end_ = second_part;
    4153             :   }
    4154       97980 : }
    4155             : 
    4156             : 
    4157          15 : void Heap::GetFromRingBuffer(char* buffer) {
    4158             :   size_t copied = 0;
    4159          15 :   if (ring_buffer_full_) {
    4160           0 :     copied = kTraceRingBufferSize - ring_buffer_end_;
    4161           0 :     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
    4162             :   }
    4163          15 :   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
    4164          15 : }
    4165             : 
    4166       31485 : void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
    4167             : 
    4168          30 : void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
    4169          15 :   *stats->start_marker = HeapStats::kStartMarker;
    4170          15 :   *stats->end_marker = HeapStats::kEndMarker;
    4171          15 :   *stats->ro_space_size = read_only_space_->Size();
    4172          30 :   *stats->ro_space_capacity = read_only_space_->Capacity();
    4173          15 :   *stats->new_space_size = new_space_->Size();
    4174          30 :   *stats->new_space_capacity = new_space_->Capacity();
    4175          15 :   *stats->old_space_size = old_space_->SizeOfObjects();
    4176          30 :   *stats->old_space_capacity = old_space_->Capacity();
    4177          15 :   *stats->code_space_size = code_space_->SizeOfObjects();
    4178          30 :   *stats->code_space_capacity = code_space_->Capacity();
    4179          15 :   *stats->map_space_size = map_space_->SizeOfObjects();
    4180          30 :   *stats->map_space_capacity = map_space_->Capacity();
    4181          15 :   *stats->lo_space_size = lo_space_->Size();
    4182          15 :   *stats->code_lo_space_size = code_lo_space_->Size();
    4183          45 :   isolate_->global_handles()->RecordStats(stats);
    4184          30 :   *stats->memory_allocator_size = memory_allocator()->Size();
    4185             :   *stats->memory_allocator_capacity =
    4186          30 :       memory_allocator()->Size() + memory_allocator()->Available();
    4187          15 :   *stats->os_error = base::OS::GetLastError();
    4188          30 :   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
    4189          30 :   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
    4190          15 :   if (take_snapshot) {
    4191           0 :     HeapIterator iterator(this);
    4192           0 :     for (HeapObject obj = iterator.next(); !obj.is_null();
    4193             :          obj = iterator.next()) {
    4194             :       InstanceType type = obj->map()->instance_type();
    4195             :       DCHECK(0 <= type && type <= LAST_TYPE);
    4196           0 :       stats->objects_per_type[type]++;
    4197           0 :       stats->size_per_type[type] += obj->Size();
    4198           0 :     }
    4199             :   }
    4200          15 :   if (stats->last_few_messages != nullptr)
    4201          15 :     GetFromRingBuffer(stats->last_few_messages);
    4202          15 :   if (stats->js_stacktrace != nullptr) {
    4203             :     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
    4204             :     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
    4205          15 :     if (gc_state() == Heap::NOT_IN_GC) {
    4206          15 :       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
    4207             :     } else {
    4208           0 :       accumulator.Add("Cannot get stack trace in GC.");
    4209             :     }
    4210             :   }
    4211          15 : }
    4212             : 
    4213     1827842 : size_t Heap::OldGenerationSizeOfObjects() {
    4214             :   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
    4215             :   size_t total = 0;
    4216     9139211 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4217             :        space = spaces.next()) {
    4218     7311369 :     total += space->SizeOfObjects();
    4219             :   }
    4220     1827842 :   return total + lo_space_->SizeOfObjects();
    4221             : }
    4222             : 
    4223         170 : uint64_t Heap::PromotedExternalMemorySize() {
    4224             :   IsolateData* isolate_data = isolate()->isolate_data();
    4225      476235 :   if (isolate_data->external_memory_ <=
    4226             :       isolate_data->external_memory_at_last_mark_compact_) {
    4227             :     return 0;
    4228             :   }
    4229             :   return static_cast<uint64_t>(
    4230       24493 :       isolate_data->external_memory_ -
    4231       24493 :       isolate_data->external_memory_at_last_mark_compact_);
    4232             : }
    4233             : 
    4234        4383 : bool Heap::ShouldOptimizeForLoadTime() {
    4235           0 :   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
    4236        4383 :          !AllocationLimitOvershotByLargeMargin() &&
    4237           0 :          MonotonicallyIncreasingTimeInMs() <
    4238        4383 :              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
    4239             : }
    4240             : 
    4241             : // This predicate is called when an old generation space cannot allocated from
    4242             : // the free list and is about to add a new page. Returning false will cause a
    4243             : // major GC. It happens when the old generation allocation limit is reached and
    4244             : // - either we need to optimize for memory usage,
    4245             : // - or the incremental marking is not in progress and we cannot start it.
    4246      489788 : bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
    4247      489788 :   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
    4248             :   // We reached the old generation allocation limit.
    4249             : 
    4250        1980 :   if (ShouldOptimizeForMemoryUsage()) return false;
    4251             : 
    4252        1945 :   if (ShouldOptimizeForLoadTime()) return true;
    4253             : 
    4254        1945 :   if (incremental_marking()->NeedsFinalization()) {
    4255        1568 :     return !AllocationLimitOvershotByLargeMargin();
    4256             :   }
    4257             : 
    4258         407 :   if (incremental_marking()->IsStopped() &&
    4259          30 :       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
    4260             :     // We cannot start incremental marking.
    4261             :     return false;
    4262             :   }
    4263         347 :   return true;
    4264             : }
    4265             : 
    4266       81933 : Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
    4267       81933 :   if (ShouldReduceMemory() || FLAG_stress_compaction) {
    4268             :     return Heap::HeapGrowingMode::kMinimal;
    4269             :   }
    4270             : 
    4271       67031 :   if (ShouldOptimizeForMemoryUsage()) {
    4272             :     return Heap::HeapGrowingMode::kConservative;
    4273             :   }
    4274             : 
    4275      133972 :   if (memory_reducer()->ShouldGrowHeapSlowly()) {
    4276             :     return Heap::HeapGrowingMode::kSlow;
    4277             :   }
    4278             : 
    4279       66969 :   return Heap::HeapGrowingMode::kDefault;
    4280             : }
    4281             : 
    4282             : // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
    4283             : // The kNoLimit means that either incremental marking is disabled or it is too
    4284             : // early to start incremental marking.
    4285             : // The kSoftLimit means that incremental marking should be started soon.
    4286             : // The kHardLimit means that incremental marking should be started immediately.
    4287     1323017 : Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
    4288             :   // Code using an AlwaysAllocateScope assumes that the GC state does not
    4289             :   // change; that implies that no marking steps must be performed.
    4290     2143246 :   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
    4291             :     // Incremental marking is disabled or it is too early to start.
    4292             :     return IncrementalMarkingLimit::kNoLimit;
    4293             :   }
    4294      819301 :   if (FLAG_stress_incremental_marking) {
    4295             :     return IncrementalMarkingLimit::kHardLimit;
    4296             :   }
    4297      791043 :   if (OldGenerationSizeOfObjects() <=
    4298             :       IncrementalMarking::kActivationThreshold) {
    4299             :     // Incremental marking is disabled or it is too early to start.
    4300             :     return IncrementalMarkingLimit::kNoLimit;
    4301             :   }
    4302       39345 :   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
    4303             :       HighMemoryPressure()) {
    4304             :     // If there is high memory pressure or stress testing is enabled, then
    4305             :     // start marking immediately.
    4306             :     return IncrementalMarkingLimit::kHardLimit;
    4307             :   }
    4308             : 
    4309       19657 :   if (FLAG_stress_marking > 0) {
    4310             :     double gained_since_last_gc =
    4311           0 :         PromotedSinceLastGC() +
    4312           0 :         (isolate()->isolate_data()->external_memory_ -
    4313           0 :          isolate()->isolate_data()->external_memory_at_last_mark_compact_);
    4314             :     double size_before_gc =
    4315           0 :         OldGenerationObjectsAndPromotedExternalMemorySize() -
    4316           0 :         gained_since_last_gc;
    4317           0 :     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
    4318           0 :     if (bytes_to_limit > 0) {
    4319           0 :       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
    4320             : 
    4321           0 :       if (FLAG_trace_stress_marking) {
    4322             :         isolate()->PrintWithTimestamp(
    4323             :             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
    4324           0 :             current_percent);
    4325             :       }
    4326             : 
    4327           0 :       if (FLAG_fuzzer_gc_analysis) {
    4328             :         // Skips values >=100% since they already trigger marking.
    4329           0 :         if (current_percent < 100.0) {
    4330             :           max_marking_limit_reached_ =
    4331           0 :               std::max(max_marking_limit_reached_, current_percent);
    4332             :         }
    4333           0 :       } else if (static_cast<int>(current_percent) >=
    4334             :                  stress_marking_percentage_) {
    4335           0 :         stress_marking_percentage_ = NextStressMarkingLimit();
    4336           0 :         return IncrementalMarkingLimit::kHardLimit;
    4337             :       }
    4338             :     }
    4339             :   }
    4340             : 
    4341       19657 :   size_t old_generation_space_available = OldGenerationSpaceAvailable();
    4342             : 
    4343       39314 :   if (old_generation_space_available > new_space_->Capacity()) {
    4344             :     return IncrementalMarkingLimit::kNoLimit;
    4345             :   }
    4346        2469 :   if (ShouldOptimizeForMemoryUsage()) {
    4347             :     return IncrementalMarkingLimit::kHardLimit;
    4348             :   }
    4349        2438 :   if (ShouldOptimizeForLoadTime()) {
    4350             :     return IncrementalMarkingLimit::kNoLimit;
    4351             :   }
    4352        2438 :   if (old_generation_space_available == 0) {
    4353             :     return IncrementalMarkingLimit::kHardLimit;
    4354             :   }
    4355        2168 :   return IncrementalMarkingLimit::kSoftLimit;
    4356             : }
    4357             : 
    4358        8120 : void Heap::EnableInlineAllocation() {
    4359        8120 :   if (!inline_allocation_disabled_) return;
    4360        8110 :   inline_allocation_disabled_ = false;
    4361             : 
    4362             :   // Update inline allocation limit for new space.
    4363        8110 :   new_space()->UpdateInlineAllocationLimit(0);
    4364             : }
    4365             : 
    4366             : 
    4367       16268 : void Heap::DisableInlineAllocation() {
    4368        8134 :   if (inline_allocation_disabled_) return;
    4369        8134 :   inline_allocation_disabled_ = true;
    4370             : 
    4371             :   // Update inline allocation limit for new space.
    4372        8134 :   new_space()->UpdateInlineAllocationLimit(0);
    4373             : 
    4374             :   // Update inline allocation limit for old spaces.
    4375             :   PagedSpaces spaces(this);
    4376        8134 :   CodeSpaceMemoryModificationScope modification_scope(this);
    4377       32536 :   for (PagedSpace* space = spaces.next(); space != nullptr;
    4378             :        space = spaces.next()) {
    4379       24402 :     space->FreeLinearAllocationArea();
    4380        8134 :   }
    4381             : }
    4382             : 
    4383       43939 : HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
    4384             :   // Code objects which should stay at a fixed address are allocated either
    4385             :   // in the first page of code space, in large object space, or (during
    4386             :   // snapshot creation) the containing page is marked as immovable.
    4387             :   DCHECK(!heap_object.is_null());
    4388             :   DCHECK(code_space_->Contains(heap_object));
    4389             :   DCHECK_GE(object_size, 0);
    4390       43939 :   if (!Heap::IsImmovable(heap_object)) {
    4391       87868 :     if (isolate()->serializer_enabled() ||
    4392       43931 :         code_space_->first_page()->Contains(heap_object->address())) {
    4393             :       MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
    4394             :     } else {
    4395             :       // Discard the first code allocation, which was on a page where it could
    4396             :       // be moved.
    4397             :       CreateFillerObjectAt(heap_object->address(), object_size,
    4398       43931 :                            ClearRecordedSlots::kNo);
    4399       43931 :       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
    4400             :       UnprotectAndRegisterMemoryChunk(heap_object);
    4401             :       ZapCodeObject(heap_object->address(), object_size);
    4402       43931 :       OnAllocationEvent(heap_object, object_size);
    4403             :     }
    4404             :   }
    4405       43939 :   return heap_object;
    4406             : }
    4407             : 
    4408   309059017 : HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
    4409             :                                            AllocationAlignment alignment) {
    4410   309059017 :   HeapObject result;
    4411   309059017 :   AllocationResult alloc = AllocateRaw(size, space, alignment);
    4412   309058952 :   if (alloc.To(&result)) {
    4413             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4414   309038950 :     return result;
    4415             :   }
    4416             :   // Two GCs before panicking. In newspace will almost always succeed.
    4417         115 :   for (int i = 0; i < 2; i++) {
    4418             :     CollectGarbage(alloc.RetrySpace(),
    4419       19959 :                    GarbageCollectionReason::kAllocationFailure);
    4420       19959 :     alloc = AllocateRaw(size, space, alignment);
    4421       19959 :     if (alloc.To(&result)) {
    4422             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4423       19844 :       return result;
    4424             :     }
    4425             :   }
    4426          22 :   return HeapObject();
    4427             : }
    4428             : 
    4429   307479898 : HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
    4430             :                                             AllocationAlignment alignment) {
    4431             :   AllocationResult alloc;
    4432   307479898 :   HeapObject result = AllocateRawWithLightRetry(size, space, alignment);
    4433   307479615 :   if (!result.is_null()) return result;
    4434             : 
    4435          22 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4436          22 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4437             :   {
    4438             :     AlwaysAllocateScope scope(isolate());
    4439          22 :     alloc = AllocateRaw(size, space, alignment);
    4440             :   }
    4441          22 :   if (alloc.To(&result)) {
    4442             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4443          22 :     return result;
    4444             :   }
    4445             :   // TODO(1181417): Fix this.
    4446           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4447             :   return HeapObject();
    4448             : }
    4449             : 
    4450             : // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
    4451             : // parameter and just do what's necessary.
    4452       43935 : HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
    4453       43931 :   AllocationResult alloc = code_lo_space()->AllocateRaw(size);
    4454       43931 :   HeapObject result;
    4455       43931 :   if (alloc.To(&result)) {
    4456             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4457       43927 :     return result;
    4458             :   }
    4459             :   // Two GCs before panicking.
    4460           0 :   for (int i = 0; i < 2; i++) {
    4461             :     CollectGarbage(alloc.RetrySpace(),
    4462           4 :                    GarbageCollectionReason::kAllocationFailure);
    4463           4 :     alloc = code_lo_space()->AllocateRaw(size);
    4464           4 :     if (alloc.To(&result)) {
    4465             :       DCHECK(result != ReadOnlyRoots(this).exception());
    4466           4 :       return result;
    4467             :     }
    4468             :   }
    4469           0 :   isolate()->counters()->gc_last_resort_from_handles()->Increment();
    4470           0 :   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
    4471             :   {
    4472             :     AlwaysAllocateScope scope(isolate());
    4473           0 :     alloc = code_lo_space()->AllocateRaw(size);
    4474             :   }
    4475           0 :   if (alloc.To(&result)) {
    4476             :     DCHECK(result != ReadOnlyRoots(this).exception());
    4477           0 :     return result;
    4478             :   }
    4479             :   // TODO(1181417): Fix this.
    4480           0 :   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
    4481             :   return HeapObject();
    4482             : }
    4483             : 
    4484      183146 : void Heap::SetUp() {
    4485             : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
    4486             :   allocation_timeout_ = NextAllocationTimeout();
    4487             : #endif
    4488             : 
    4489             :   // Initialize heap spaces and initial maps and objects.
    4490             :   //
    4491             :   // If the heap is not yet configured (e.g. through the API), configure it.
    4492             :   // Configuration is based on the flags new-space-size (really the semispace
    4493             :   // size) and old-space-size if set or the initial values of semispace_size_
    4494             :   // and old_generation_size_ otherwise.
    4495       61048 :   if (!configured_) ConfigureHeapDefault();
    4496             : 
    4497             :   mmap_region_base_ =
    4498       61048 :       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
    4499       61049 :       ~kMmapRegionMask;
    4500             : 
    4501             :   // Set up memory allocator.
    4502             :   memory_allocator_.reset(
    4503      183147 :       new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
    4504             : 
    4505       61049 :   store_buffer_.reset(new StoreBuffer(this));
    4506             : 
    4507       61048 :   heap_controller_.reset(new HeapController(this));
    4508             : 
    4509       61048 :   mark_compact_collector_.reset(new MarkCompactCollector(this));
    4510             : 
    4511       61049 :   scavenger_collector_.reset(new ScavengerCollector(this));
    4512             : 
    4513             :   incremental_marking_.reset(
    4514             :       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
    4515       61049 :                              mark_compact_collector_->weak_objects()));
    4516             : 
    4517       61049 :   if (FLAG_concurrent_marking || FLAG_parallel_marking) {
    4518             :     MarkCompactCollector::MarkingWorklist* marking_worklist =
    4519             :         mark_compact_collector_->marking_worklist();
    4520             :     concurrent_marking_.reset(new ConcurrentMarking(
    4521             :         this, marking_worklist->shared(), marking_worklist->on_hold(),
    4522       60939 :         mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
    4523             :   } else {
    4524             :     concurrent_marking_.reset(
    4525         110 :         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
    4526             :   }
    4527             : 
    4528      488392 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4529      488392 :     space_[i] = nullptr;
    4530             :   }
    4531             : 
    4532       61049 :   space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
    4533             :   space_[NEW_SPACE] = new_space_ =
    4534       61049 :       new NewSpace(this, memory_allocator_->data_page_allocator(),
    4535       61049 :                    initial_semispace_size_, max_semi_space_size_);
    4536       61049 :   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
    4537       61049 :   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
    4538       61049 :   space_[MAP_SPACE] = map_space_ = new MapSpace(this);
    4539       61049 :   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
    4540             :   space_[NEW_LO_SPACE] = new_lo_space_ =
    4541      122098 :       new NewLargeObjectSpace(this, new_space_->Capacity());
    4542       61049 :   space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
    4543             : 
    4544     4639724 :   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
    4545             :        i++) {
    4546     4578675 :     deferred_counters_[i] = 0;
    4547             :   }
    4548             : 
    4549       61049 :   tracer_.reset(new GCTracer(this));
    4550             : #ifdef ENABLE_MINOR_MC
    4551       61049 :   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
    4552             : #else
    4553             :   minor_mark_compact_collector_ = nullptr;
    4554             : #endif  // ENABLE_MINOR_MC
    4555       61049 :   array_buffer_collector_.reset(new ArrayBufferCollector(this));
    4556       61049 :   gc_idle_time_handler_.reset(new GCIdleTimeHandler());
    4557       61049 :   memory_reducer_.reset(new MemoryReducer(this));
    4558       61049 :   if (V8_UNLIKELY(FLAG_gc_stats)) {
    4559           0 :     live_object_stats_.reset(new ObjectStats(this));
    4560           0 :     dead_object_stats_.reset(new ObjectStats(this));
    4561             :   }
    4562       61049 :   local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
    4563             : 
    4564      122098 :   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
    4565      122098 :   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
    4566             : 
    4567       61049 :   store_buffer()->SetUp();
    4568             : 
    4569       61049 :   mark_compact_collector()->SetUp();
    4570             : #ifdef ENABLE_MINOR_MC
    4571       61049 :   if (minor_mark_compact_collector() != nullptr) {
    4572       61049 :     minor_mark_compact_collector()->SetUp();
    4573             :   }
    4574             : #endif  // ENABLE_MINOR_MC
    4575             : 
    4576       61049 :   if (FLAG_idle_time_scavenge) {
    4577       61049 :     scavenge_job_.reset(new ScavengeJob());
    4578             :     idle_scavenge_observer_.reset(new IdleScavengeObserver(
    4579       61049 :         *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
    4580      122098 :     new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
    4581             :   }
    4582             : 
    4583             :   SetGetExternallyAllocatedMemoryInBytesCallback(
    4584             :       DefaultGetExternallyAllocatedMemoryInBytesCallback);
    4585             : 
    4586       61049 :   if (FLAG_stress_marking > 0) {
    4587           0 :     stress_marking_percentage_ = NextStressMarkingLimit();
    4588           0 :     stress_marking_observer_ = new StressMarkingObserver(*this);
    4589             :     AddAllocationObserversToAllSpaces(stress_marking_observer_,
    4590           0 :                                       stress_marking_observer_);
    4591             :   }
    4592       61049 :   if (FLAG_stress_scavenge > 0) {
    4593           0 :     stress_scavenge_observer_ = new StressScavengeObserver(*this);
    4594           0 :     new_space()->AddAllocationObserver(stress_scavenge_observer_);
    4595             :   }
    4596             : 
    4597       61049 :   write_protect_code_memory_ = FLAG_write_protect_code_memory;
    4598       61049 : }
    4599             : 
    4600       60989 : void Heap::InitializeHashSeed() {
    4601             :   DCHECK(!deserialization_complete_);
    4602             :   uint64_t new_hash_seed;
    4603       60989 :   if (FLAG_hash_seed == 0) {
    4604       60919 :     int64_t rnd = isolate()->random_number_generator()->NextInt64();
    4605       60918 :     new_hash_seed = static_cast<uint64_t>(rnd);
    4606             :   } else {
    4607          70 :     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
    4608             :   }
    4609             :   ReadOnlyRoots(this).hash_seed()->copy_in(
    4610             :       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
    4611       60988 : }
    4612             : 
    4613     6966404 : void Heap::SetStackLimits() {
    4614             :   DCHECK_NOT_NULL(isolate_);
    4615             :   DCHECK(isolate_ == isolate());
    4616             :   // On 64 bit machines, pointers are generally out of range of Smis.  We write
    4617             :   // something that looks like an out of range Smi to the GC.
    4618             : 
    4619             :   // Set up the special root array entries containing the stack limits.
    4620             :   // These are actually addresses, but the tag makes the GC ignore it.
    4621             :   roots_table()[RootIndex::kStackLimit] =
    4622    13932818 :       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
    4623             :   roots_table()[RootIndex::kRealStackLimit] =
    4624     6966409 :       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
    4625     6966409 : }
    4626             : 
    4627         256 : void Heap::ClearStackLimits() {
    4628         256 :   roots_table()[RootIndex::kStackLimit] = kNullAddress;
    4629         256 :   roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
    4630         256 : }
    4631             : 
    4632           0 : int Heap::NextAllocationTimeout(int current_timeout) {
    4633           0 :   if (FLAG_random_gc_interval > 0) {
    4634             :     // If current timeout hasn't reached 0 the GC was caused by something
    4635             :     // different than --stress-atomic-gc flag and we don't update the timeout.
    4636           0 :     if (current_timeout <= 0) {
    4637           0 :       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
    4638             :     } else {
    4639             :       return current_timeout;
    4640             :     }
    4641             :   }
    4642           0 :   return FLAG_gc_interval;
    4643             : }
    4644             : 
    4645           0 : void Heap::PrintAllocationsHash() {
    4646           0 :   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
    4647           0 :   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
    4648           0 : }
    4649             : 
    4650           0 : void Heap::PrintMaxMarkingLimitReached() {
    4651             :   PrintF("\n### Maximum marking limit reached = %.02lf\n",
    4652           0 :          max_marking_limit_reached_);
    4653           0 : }
    4654             : 
    4655           0 : void Heap::PrintMaxNewSpaceSizeReached() {
    4656             :   PrintF("\n### Maximum new space size reached = %.02lf\n",
    4657           0 :          stress_scavenge_observer_->MaxNewSpaceSizeReached());
    4658           0 : }
    4659             : 
    4660           0 : int Heap::NextStressMarkingLimit() {
    4661           0 :   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
    4662             : }
    4663             : 
    4664      122098 : void Heap::NotifyDeserializationComplete() {
    4665             :   PagedSpaces spaces(this);
    4666      244196 :   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
    4667      366294 :     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
    4668             : #ifdef DEBUG
    4669             :     // All pages right after bootstrapping must be marked as never-evacuate.
    4670             :     for (Page* p : *s) {
    4671             :       DCHECK(p->NeverEvacuate());
    4672             :     }
    4673             : #endif  // DEBUG
    4674             :   }
    4675             : 
    4676       61049 :   read_only_space()->MarkAsReadOnly();
    4677       61049 :   deserialization_complete_ = true;
    4678       61049 : }
    4679             : 
    4680       89757 : void Heap::NotifyBootstrapComplete() {
    4681             :   // This function is invoked for each native context creation. We are
    4682             :   // interested only in the first native context.
    4683       89757 :   if (old_generation_capacity_after_bootstrap_ == 0) {
    4684       58235 :     old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
    4685             :   }
    4686       89757 : }
    4687             : 
    4688      489547 : void Heap::NotifyOldGenerationExpansion() {
    4689             :   const size_t kMemoryReducerActivationThreshold = 1 * MB;
    4690      695129 :   if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
    4691       84778 :       OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
    4692      507415 :                                      kMemoryReducerActivationThreshold &&
    4693             :       FLAG_memory_reducer_for_small_heaps) {
    4694             :     MemoryReducer::Event event;
    4695       17868 :     event.type = MemoryReducer::kPossibleGarbage;
    4696       17868 :     event.time_ms = MonotonicallyIncreasingTimeInMs();
    4697       17868 :     memory_reducer()->NotifyPossibleGarbage(event);
    4698             :   }
    4699      489547 : }
    4700             : 
    4701         140 : void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
    4702             :   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
    4703         140 :   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
    4704         140 : }
    4705             : 
    4706           0 : EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
    4707           0 :   return local_embedder_heap_tracer()->remote_tracer();
    4708             : }
    4709             : 
    4710           5 : void Heap::RegisterExternallyReferencedObject(Address* location) {
    4711             :   // The embedder is not aware of whether numbers are materialized as heap
    4712             :   // objects are just passed around as Smis.
    4713           5 :   Object object(*location);
    4714           5 :   if (!object->IsHeapObject()) return;
    4715             :   HeapObject heap_object = HeapObject::cast(object);
    4716             :   DCHECK(Contains(heap_object));
    4717          10 :   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
    4718           0 :     incremental_marking()->WhiteToGreyAndPush(heap_object);
    4719             :   } else {
    4720             :     DCHECK(mark_compact_collector()->in_use());
    4721             :     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
    4722             :   }
    4723             : }
    4724             : 
    4725      122068 : void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
    4726             : 
    4727      122068 : void Heap::TearDown() {
    4728             :   DCHECK_EQ(gc_state_, TEAR_DOWN);
    4729             : #ifdef VERIFY_HEAP
    4730             :   if (FLAG_verify_heap) {
    4731             :     Verify();
    4732             :   }
    4733             : #endif
    4734             : 
    4735       61034 :   UpdateMaximumCommitted();
    4736             : 
    4737       61034 :   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
    4738           0 :     PrintAllocationsHash();
    4739             :   }
    4740             : 
    4741       61034 :   if (FLAG_fuzzer_gc_analysis) {
    4742           0 :     if (FLAG_stress_marking > 0) {
    4743             :       PrintMaxMarkingLimitReached();
    4744             :     }
    4745           0 :     if (FLAG_stress_scavenge > 0) {
    4746           0 :       PrintMaxNewSpaceSizeReached();
    4747             :     }
    4748             :   }
    4749             : 
    4750       61034 :   if (FLAG_idle_time_scavenge) {
    4751      122068 :     new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
    4752             :     idle_scavenge_observer_.reset();
    4753             :     scavenge_job_.reset();
    4754             :   }
    4755             : 
    4756       61034 :   if (FLAG_stress_marking > 0) {
    4757             :     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
    4758           0 :                                            stress_marking_observer_);
    4759           0 :     delete stress_marking_observer_;
    4760           0 :     stress_marking_observer_ = nullptr;
    4761             :   }
    4762       61034 :   if (FLAG_stress_scavenge > 0) {
    4763           0 :     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
    4764           0 :     delete stress_scavenge_observer_;
    4765           0 :     stress_scavenge_observer_ = nullptr;
    4766             :   }
    4767             : 
    4768             :   heap_controller_.reset();
    4769             : 
    4770       61034 :   if (mark_compact_collector_) {
    4771       61034 :     mark_compact_collector_->TearDown();
    4772             :     mark_compact_collector_.reset();
    4773             :   }
    4774             : 
    4775             : #ifdef ENABLE_MINOR_MC
    4776       61034 :   if (minor_mark_compact_collector_ != nullptr) {
    4777       61034 :     minor_mark_compact_collector_->TearDown();
    4778       61034 :     delete minor_mark_compact_collector_;
    4779       61034 :     minor_mark_compact_collector_ = nullptr;
    4780             :   }
    4781             : #endif  // ENABLE_MINOR_MC
    4782             : 
    4783             :   scavenger_collector_.reset();
    4784             :   array_buffer_collector_.reset();
    4785       61034 :   incremental_marking_.reset();
    4786             :   concurrent_marking_.reset();
    4787             : 
    4788             :   gc_idle_time_handler_.reset();
    4789             : 
    4790       61034 :   if (memory_reducer_ != nullptr) {
    4791       61034 :     memory_reducer_->TearDown();
    4792             :     memory_reducer_.reset();
    4793             :   }
    4794             : 
    4795             :   live_object_stats_.reset();
    4796             :   dead_object_stats_.reset();
    4797             : 
    4798             :   local_embedder_heap_tracer_.reset();
    4799             : 
    4800       61034 :   external_string_table_.TearDown();
    4801             : 
    4802             :   // Tear down all ArrayBuffers before tearing down the heap since  their
    4803             :   // byte_length may be a HeapNumber which is required for freeing the backing
    4804             :   // store.
    4805       61034 :   ArrayBufferTracker::TearDown(this);
    4806             : 
    4807             :   tracer_.reset();
    4808             : 
    4809      488272 :   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
    4810      488272 :     delete space_[i];
    4811      488272 :     space_[i] = nullptr;
    4812             :   }
    4813             : 
    4814       61034 :   store_buffer()->TearDown();
    4815             : 
    4816       61034 :   memory_allocator()->TearDown();
    4817             : 
    4818             :   StrongRootsList* next = nullptr;
    4819      122068 :   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
    4820           0 :     next = list->next;
    4821           0 :     delete list;
    4822             :   }
    4823       61034 :   strong_roots_list_ = nullptr;
    4824             : 
    4825             :   store_buffer_.reset();
    4826             :   memory_allocator_.reset();
    4827       61034 : }
    4828             : 
    4829          39 : void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4830             :                                  GCType gc_type, void* data) {
    4831             :   DCHECK_NOT_NULL(callback);
    4832             :   DCHECK(gc_prologue_callbacks_.end() ==
    4833             :          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
    4834             :                    GCCallbackTuple(callback, gc_type, data)));
    4835          39 :   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
    4836          39 : }
    4837             : 
    4838          35 : void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
    4839             :                                     void* data) {
    4840             :   DCHECK_NOT_NULL(callback);
    4841          70 :   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
    4842         105 :     if (gc_prologue_callbacks_[i].callback == callback &&
    4843          35 :         gc_prologue_callbacks_[i].data == data) {
    4844             :       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
    4845             :       gc_prologue_callbacks_.pop_back();
    4846          35 :       return;
    4847             :     }
    4848             :   }
    4849           0 :   UNREACHABLE();
    4850             : }
    4851             : 
    4852       69244 : void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4853             :                                  GCType gc_type, void* data) {
    4854             :   DCHECK_NOT_NULL(callback);
    4855             :   DCHECK(gc_epilogue_callbacks_.end() ==
    4856             :          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
    4857             :                    GCCallbackTuple(callback, gc_type, data)));
    4858       69244 :   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
    4859       69244 : }
    4860             : 
    4861        8195 : void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
    4862             :                                     void* data) {
    4863             :   DCHECK_NOT_NULL(callback);
    4864       32780 :   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
    4865       40975 :     if (gc_epilogue_callbacks_[i].callback == callback &&
    4866        8195 :         gc_epilogue_callbacks_[i].data == data) {
    4867             :       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
    4868             :       gc_epilogue_callbacks_.pop_back();
    4869        8195 :       return;
    4870             :     }
    4871             :   }
    4872           0 :   UNREACHABLE();
    4873             : }
    4874             : 
    4875             : namespace {
    4876         382 : Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
    4877             :                                            Handle<WeakArrayList> array,
    4878             :                                            PretenureFlag pretenure) {
    4879         382 :   if (array->length() == 0) {
    4880           0 :     return array;
    4881             :   }
    4882         382 :   int new_length = array->CountLiveWeakReferences();
    4883         382 :   if (new_length == array->length()) {
    4884         277 :     return array;
    4885             :   }
    4886             : 
    4887             :   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
    4888             :       heap->isolate(),
    4889             :       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
    4890         105 :       new_length, pretenure);
    4891             :   // Allocation might have caused GC and turned some of the elements into
    4892             :   // cleared weak heap objects. Count the number of live references again and
    4893             :   // fill in the new array.
    4894             :   int copy_to = 0;
    4895       19840 :   for (int i = 0; i < array->length(); i++) {
    4896        9815 :     MaybeObject element = array->Get(i);
    4897       10135 :     if (element->IsCleared()) continue;
    4898       18990 :     new_array->Set(copy_to++, element);
    4899             :   }
    4900             :   new_array->set_length(copy_to);
    4901         105 :   return new_array;
    4902             : }
    4903             : 
    4904             : }  // anonymous namespace
    4905             : 
    4906         191 : void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
    4907             :   // Find known PrototypeUsers and compact them.
    4908             :   std::vector<Handle<PrototypeInfo>> prototype_infos;
    4909             :   {
    4910         191 :     HeapIterator iterator(this);
    4911     3156082 :     for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
    4912     1577850 :       if (o->IsPrototypeInfo()) {
    4913             :         PrototypeInfo prototype_info = PrototypeInfo::cast(o);
    4914       25804 :         if (prototype_info->prototype_users()->IsWeakArrayList()) {
    4915          25 :           prototype_infos.emplace_back(handle(prototype_info, isolate()));
    4916             :         }
    4917             :       }
    4918         191 :     }
    4919             :   }
    4920         407 :   for (auto& prototype_info : prototype_infos) {
    4921             :     Handle<WeakArrayList> array(
    4922             :         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
    4923             :     DCHECK_IMPLIES(pretenure == TENURED,
    4924             :                    InOldSpace(*array) ||
    4925             :                        *array == ReadOnlyRoots(this).empty_weak_array_list());
    4926             :     WeakArrayList new_array = PrototypeUsers::Compact(
    4927          25 :         array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
    4928          25 :     prototype_info->set_prototype_users(new_array);
    4929             :   }
    4930             : 
    4931             :   // Find known WeakArrayLists and compact them.
    4932         191 :   Handle<WeakArrayList> scripts(script_list(), isolate());
    4933             :   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
    4934         191 :   scripts = CompactWeakArrayList(this, scripts, pretenure);
    4935         191 :   set_script_list(*scripts);
    4936             : 
    4937             :   Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
    4938         191 :                                        isolate());
    4939             :   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
    4940         191 :   no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
    4941         191 :   set_noscript_shared_function_infos(*no_script_list);
    4942         191 : }
    4943             : 
    4944      145665 : void Heap::AddRetainedMap(Handle<Map> map) {
    4945      145665 :   if (map->is_in_retained_map_list()) {
    4946      145665 :     return;
    4947             :   }
    4948       46021 :   Handle<WeakArrayList> array(retained_maps(), isolate());
    4949       46021 :   if (array->IsFull()) {
    4950       11903 :     CompactRetainedMaps(*array);
    4951             :   }
    4952             :   array =
    4953       46021 :       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
    4954             :   array = WeakArrayList::AddToEnd(
    4955             :       isolate(), array,
    4956       92042 :       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
    4957       92042 :   if (*array != retained_maps()) {
    4958       15917 :     set_retained_maps(*array);
    4959             :   }
    4960             :   map->set_is_in_retained_map_list(true);
    4961             : }
    4962             : 
    4963       11903 : void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
    4964             :   DCHECK_EQ(retained_maps, this->retained_maps());
    4965             :   int length = retained_maps->length();
    4966             :   int new_length = 0;
    4967             :   int new_number_of_disposed_maps = 0;
    4968             :   // This loop compacts the array by removing cleared weak cells.
    4969       50469 :   for (int i = 0; i < length; i += 2) {
    4970       38566 :     MaybeObject maybe_object = retained_maps->Get(i);
    4971       38566 :     if (maybe_object->IsCleared()) {
    4972        9049 :       continue;
    4973             :     }
    4974             : 
    4975             :     DCHECK(maybe_object->IsWeak());
    4976             : 
    4977       29517 :     MaybeObject age = retained_maps->Get(i + 1);
    4978             :     DCHECK(age->IsSmi());
    4979       29517 :     if (i != new_length) {
    4980        3294 :       retained_maps->Set(new_length, maybe_object);
    4981        3294 :       retained_maps->Set(new_length + 1, age);
    4982             :     }
    4983       29517 :     if (i < number_of_disposed_maps_) {
    4984          73 :       new_number_of_disposed_maps += 2;
    4985             :     }
    4986       29517 :     new_length += 2;
    4987             :   }
    4988       11903 :   number_of_disposed_maps_ = new_number_of_disposed_maps;
    4989             :   HeapObject undefined = ReadOnlyRoots(this).undefined_value();
    4990       30001 :   for (int i = new_length; i < length; i++) {
    4991       18098 :     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
    4992             :   }
    4993       11903 :   if (new_length != length) retained_maps->set_length(new_length);
    4994       11903 : }
    4995             : 
    4996           0 : void Heap::FatalProcessOutOfMemory(const char* location) {
    4997           0 :   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
    4998             : }
    4999             : 
    5000             : #ifdef DEBUG
    5001             : 
    5002             : class PrintHandleVisitor : public RootVisitor {
    5003             :  public:
    5004             :   void VisitRootPointers(Root root, const char* description,
    5005             :                          FullObjectSlot start, FullObjectSlot end) override {
    5006             :     for (FullObjectSlot p = start; p < end; ++p)
    5007             :       PrintF("  handle %p to %p\n", p.ToVoidPtr(),
    5008             :              reinterpret_cast<void*>((*p).ptr()));
    5009             :   }
    5010             : };
    5011             : 
    5012             : 
    5013             : void Heap::PrintHandles() {
    5014             :   PrintF("Handles:\n");
    5015             :   PrintHandleVisitor v;
    5016             :   isolate_->handle_scope_implementer()->Iterate(&v);
    5017             : }
    5018             : 
    5019             : #endif
    5020             : 
    5021             : class CheckHandleCountVisitor : public RootVisitor {
    5022             :  public:
    5023           0 :   CheckHandleCountVisitor() : handle_count_(0) {}
    5024           0 :   ~CheckHandleCountVisitor() override {
    5025           0 :     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
    5026           0 :   }
    5027           0 :   void VisitRootPointers(Root root, const char* description,
    5028             :                          FullObjectSlot start, FullObjectSlot end) override {
    5029           0 :     handle_count_ += end - start;
    5030           0 :   }
    5031             : 
    5032             :  private:
    5033             :   ptrdiff_t handle_count_;
    5034             : };
    5035             : 
    5036             : 
    5037           0 : void Heap::CheckHandleCount() {
    5038             :   CheckHandleCountVisitor v;
    5039           0 :   isolate_->handle_scope_implementer()->Iterate(&v);
    5040           0 : }
    5041             : 
    5042       61161 : Address* Heap::store_buffer_top_address() {
    5043       61161 :   return store_buffer()->top_address();
    5044             : }
    5045             : 
    5046             : // static
    5047         112 : intptr_t Heap::store_buffer_mask_constant() {
    5048         112 :   return StoreBuffer::kStoreBufferMask;
    5049             : }
    5050             : 
    5051             : // static
    5052       61161 : Address Heap::store_buffer_overflow_function_address() {
    5053       61161 :   return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
    5054             : }
    5055             : 
    5056        5369 : void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
    5057             :   DCHECK(!IsLargeObject(object));
    5058             :   Page* page = Page::FromAddress(slot.address());
    5059      737778 :   if (!page->InYoungGeneration()) {
    5060             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5061             :     store_buffer()->DeleteEntry(slot.address());
    5062             :   }
    5063        5369 : }
    5064             : 
    5065             : #ifdef DEBUG
    5066             : void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
    5067             :   DCHECK(!IsLargeObject(object));
    5068             :   if (InYoungGeneration(object)) return;
    5069             :   Page* page = Page::FromAddress(slot.address());
    5070             :   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5071             :   store_buffer()->MoveAllEntriesToRememberedSet();
    5072             :   CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
    5073             :   // Old to old slots are filtered with invalidated slots.
    5074             :   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
    5075             :                 page->RegisteredObjectWithInvalidatedSlots(object));
    5076             : }
    5077             : #endif
    5078             : 
    5079      279970 : void Heap::ClearRecordedSlotRange(Address start, Address end) {
    5080             :   Page* page = Page::FromAddress(start);
    5081             :   DCHECK(!page->IsLargePage());
    5082     4436648 :   if (!page->InYoungGeneration()) {
    5083             :     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
    5084             :     store_buffer()->DeleteEntry(start, end);
    5085             :   }
    5086      279970 : }
    5087             : 
    5088    27649792 : PagedSpace* PagedSpaces::next() {
    5089    27649792 :   switch (counter_++) {
    5090             :     case RO_SPACE:
    5091             :       // skip NEW_SPACE
    5092     5258239 :       counter_++;
    5093    22051923 :       return heap_->read_only_space();
    5094             :     case OLD_SPACE:
    5095    11195788 :       return heap_->old_space();
    5096             :     case CODE_SPACE:
    5097    11195792 :       return heap_->code_space();
    5098             :     case MAP_SPACE:
    5099    11195788 :       return heap_->map_space();
    5100             :     default:
    5101             :       return nullptr;
    5102             :   }
    5103             : }
    5104             : 
    5105      222968 : SpaceIterator::SpaceIterator(Heap* heap)
    5106      230553 :     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
    5107             : 
    5108             : SpaceIterator::~SpaceIterator() = default;
    5109             : 
    5110     2006712 : bool SpaceIterator::has_next() {
    5111             :   // Iterate until no more spaces.
    5112     2067392 :   return current_space_ != LAST_SPACE;
    5113             : }
    5114             : 
    5115     1783744 : Space* SpaceIterator::next() {
    5116             :   DCHECK(has_next());
    5117     9060648 :   return heap_->space(++current_space_);
    5118             : }
    5119             : 
    5120             : 
    5121        1290 : class HeapObjectsFilter {
    5122             :  public:
    5123        1290 :   virtual ~HeapObjectsFilter() = default;
    5124             :   virtual bool SkipObject(HeapObject object) = 0;
    5125             : };
    5126             : 
    5127             : 
    5128             : class UnreachableObjectsFilter : public HeapObjectsFilter {
    5129             :  public:
    5130        3870 :   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
    5131        1290 :     MarkReachableObjects();
    5132        1290 :   }
    5133             : 
    5134        2580 :   ~UnreachableObjectsFilter() override {
    5135       12699 :     for (auto it : reachable_) {
    5136       20238 :       delete it.second;
    5137             :       it.second = nullptr;
    5138             :     }
    5139        2580 :   }
    5140             : 
    5141    10876858 :   bool SkipObject(HeapObject object) override {
    5142    10876858 :     if (object->IsFiller()) return true;
    5143    10876858 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5144    10876858 :     if (reachable_.count(chunk) == 0) return true;
    5145    21753542 :     return reachable_[chunk]->count(object) == 0;
    5146             :   }
    5147             : 
    5148             :  private:
    5149    55397150 :   bool MarkAsReachable(HeapObject object) {
    5150    55397150 :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5151    55397150 :     if (reachable_.count(chunk) == 0) {
    5152       20238 :       reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
    5153             :     }
    5154   110794300 :     if (reachable_[chunk]->count(object)) return false;
    5155    10348881 :     reachable_[chunk]->insert(object);
    5156    10348881 :     return true;
    5157             :   }
    5158             : 
    5159        1290 :   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
    5160             :    public:
    5161             :     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
    5162        1290 :         : filter_(filter) {}
    5163             : 
    5164    22205254 :     void VisitPointers(HeapObject host, ObjectSlot start,
    5165             :                        ObjectSlot end) override {
    5166    22205254 :       MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
    5167    22205254 :     }
    5168             : 
    5169     1192985 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
    5170             :                        MaybeObjectSlot end) final {
    5171     1192985 :       MarkPointers(start, end);
    5172     1192985 :     }
    5173             : 
    5174        9030 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    5175        9030 :       Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5176             :       MarkHeapObject(target);
    5177        9030 :     }
    5178       40296 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    5179             :       MarkHeapObject(rinfo->target_object());
    5180       40296 :     }
    5181             : 
    5182     3988347 :     void VisitRootPointers(Root root, const char* description,
    5183             :                            FullObjectSlot start, FullObjectSlot end) override {
    5184             :       MarkPointersImpl(start, end);
    5185     3988347 :     }
    5186             : 
    5187        1290 :     void TransitiveClosure() {
    5188    10351461 :       while (!marking_stack_.empty()) {
    5189    10348881 :         HeapObject obj = marking_stack_.back();
    5190             :         marking_stack_.pop_back();
    5191    10348881 :         obj->Iterate(this);
    5192             :       }
    5193        1290 :     }
    5194             : 
    5195             :    private:
    5196    23398239 :     void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
    5197             :       MarkPointersImpl(start, end);
    5198    23398239 :     }
    5199             : 
    5200             :     template <typename TSlot>
    5201             :     V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
    5202             :       // Treat weak references as strong.
    5203    91958880 :       for (TSlot p = start; p < end; ++p) {
    5204    64572294 :         typename TSlot::TObject object = *p;
    5205    64572294 :         HeapObject heap_object;
    5206    64572294 :         if (object.GetHeapObject(&heap_object)) {
    5207             :           MarkHeapObject(heap_object);
    5208             :         }
    5209             :       }
    5210             :     }
    5211             : 
    5212             :     V8_INLINE void MarkHeapObject(HeapObject heap_object) {
    5213    55397150 :       if (filter_->MarkAsReachable(heap_object)) {
    5214    10348881 :         marking_stack_.push_back(heap_object);
    5215             :       }
    5216             :     }
    5217             : 
    5218             :     UnreachableObjectsFilter* filter_;
    5219             :     std::vector<HeapObject> marking_stack_;
    5220             :   };
    5221             : 
    5222             :   friend class MarkingVisitor;
    5223             : 
    5224        1290 :   void MarkReachableObjects() {
    5225             :     MarkingVisitor visitor(this);
    5226        1290 :     heap_->IterateRoots(&visitor, VISIT_ALL);
    5227        1290 :     visitor.TransitiveClosure();
    5228        1290 :   }
    5229             : 
    5230             :   Heap* heap_;
    5231             :   DisallowHeapAllocation no_allocation_;
    5232             :   std::unordered_map<MemoryChunk*,
    5233             :                      std::unordered_set<HeapObject, Object::Hasher>*>
    5234             :       reachable_;
    5235             : };
    5236             : 
    5237        7585 : HeapIterator::HeapIterator(Heap* heap,
    5238             :                            HeapIterator::HeapObjectsFiltering filtering)
    5239             :     : heap_(heap),
    5240             :       filtering_(filtering),
    5241             :       filter_(nullptr),
    5242             :       space_iterator_(nullptr),
    5243        7585 :       object_iterator_(nullptr) {
    5244             :   heap_->MakeHeapIterable();
    5245        7585 :   heap_->heap_iterator_start();
    5246             :   // Start the iteration.
    5247       15170 :   space_iterator_ = new SpaceIterator(heap_);
    5248        7585 :   switch (filtering_) {
    5249             :     case kFilterUnreachable:
    5250        1290 :       filter_ = new UnreachableObjectsFilter(heap_);
    5251        1290 :       break;
    5252             :     default:
    5253             :       break;
    5254             :   }
    5255       22755 :   object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5256        7585 : }
    5257             : 
    5258             : 
    5259        7585 : HeapIterator::~HeapIterator() {
    5260        7585 :   heap_->heap_iterator_end();
    5261             : #ifdef DEBUG
    5262             :   // Assert that in filtering mode we have iterated through all
    5263             :   // objects. Otherwise, heap will be left in an inconsistent state.
    5264             :   if (filtering_ != kNoFiltering) {
    5265             :     DCHECK_NULL(object_iterator_);
    5266             :   }
    5267             : #endif
    5268        7585 :   delete space_iterator_;
    5269        7585 :   delete filter_;
    5270        7585 : }
    5271             : 
    5272    86462752 : HeapObject HeapIterator::next() {
    5273    86462752 :   if (filter_ == nullptr) return NextObject();
    5274             : 
    5275    10350171 :   HeapObject obj = NextObject();
    5276    21228319 :   while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
    5277    10350171 :   return obj;
    5278             : }
    5279             : 
    5280    86990729 : HeapObject HeapIterator::NextObject() {
    5281             :   // No iterator means we are done.
    5282    86990729 :   if (object_iterator_.get() == nullptr) return HeapObject();
    5283             : 
    5284    86990729 :   HeapObject obj = object_iterator_.get()->Next();
    5285    86990729 :   if (!obj.is_null()) {
    5286             :     // If the current iterator has more objects we are fine.
    5287    86953917 :     return obj;
    5288             :   } else {
    5289             :     // Go though the spaces looking for one that has objects.
    5290      121360 :     while (space_iterator_->has_next()) {
    5291      106190 :       object_iterator_ = space_iterator_->next()->GetObjectIterator();
    5292       53095 :       obj = object_iterator_.get()->Next();
    5293       53095 :       if (!obj.is_null()) {
    5294       29227 :         return obj;
    5295             :       }
    5296             :     }
    5297             :   }
    5298             :   // Done with the last space.
    5299             :   object_iterator_.reset(nullptr);
    5300        7585 :   return HeapObject();
    5301             : }
    5302             : 
    5303       97980 : void Heap::UpdateTotalGCTime(double duration) {
    5304       97980 :   if (FLAG_trace_gc_verbose) {
    5305           0 :     total_gc_time_ms_ += duration;
    5306             :   }
    5307       97980 : }
    5308             : 
    5309       74510 : void Heap::ExternalStringTable::CleanUpYoung() {
    5310             :   int last = 0;
    5311       74510 :   Isolate* isolate = heap_->isolate();
    5312      149494 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5313       75004 :     Object o = young_strings_[i];
    5314         237 :     if (o->IsTheHole(isolate)) {
    5315         217 :       continue;
    5316             :     }
    5317             :     // The real external string is already in one of these vectors and was or
    5318             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5319          20 :     if (o->IsThinString()) continue;
    5320             :     DCHECK(o->IsExternalString());
    5321          20 :     if (InYoungGeneration(o)) {
    5322          40 :       young_strings_[last++] = o;
    5323             :     } else {
    5324           0 :       old_strings_.push_back(o);
    5325             :     }
    5326             :   }
    5327       74510 :   young_strings_.resize(last);
    5328       74510 : }
    5329             : 
    5330       74510 : void Heap::ExternalStringTable::CleanUpAll() {
    5331       74510 :   CleanUpYoung();
    5332             :   int last = 0;
    5333       74510 :   Isolate* isolate = heap_->isolate();
    5334      370602 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5335      405533 :     Object o = old_strings_[i];
    5336      110791 :     if (o->IsTheHole(isolate)) {
    5337        1350 :       continue;
    5338             :     }
    5339             :     // The real external string is already in one of these vectors and was or
    5340             :     // will be processed. Re-processing it will add a duplicate to the vector.
    5341      109441 :     if (o->IsThinString()) continue;
    5342             :     DCHECK(o->IsExternalString());
    5343             :     DCHECK(!InYoungGeneration(o));
    5344      218882 :     old_strings_[last++] = o;
    5345             :   }
    5346       74510 :   old_strings_.resize(last);
    5347             : #ifdef VERIFY_HEAP
    5348             :   if (FLAG_verify_heap) {
    5349             :     Verify();
    5350             :   }
    5351             : #endif
    5352       74510 : }
    5353             : 
    5354       61033 : void Heap::ExternalStringTable::TearDown() {
    5355      122316 :   for (size_t i = 0; i < young_strings_.size(); ++i) {
    5356       61283 :     Object o = young_strings_[i];
    5357             :     // Dont finalize thin strings.
    5358         134 :     if (o->IsThinString()) continue;
    5359         116 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5360             :   }
    5361             :   young_strings_.clear();
    5362      291268 :   for (size_t i = 0; i < old_strings_.size(); ++i) {
    5363      230234 :     Object o = old_strings_[i];
    5364             :     // Dont finalize thin strings.
    5365       84599 :     if (o->IsThinString()) continue;
    5366       84599 :     heap_->FinalizeExternalString(ExternalString::cast(o));
    5367             :   }
    5368             :   old_strings_.clear();
    5369       61034 : }
    5370             : 
    5371             : 
    5372      706788 : void Heap::RememberUnmappedPage(Address page, bool compacted) {
    5373             :   // Tag the page pointer to make it findable in the dump file.
    5374      706788 :   if (compacted) {
    5375       10447 :     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
    5376             :   } else {
    5377      696341 :     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
    5378             :   }
    5379      767837 :   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
    5380      767837 :   remembered_unmapped_pages_index_++;
    5381      767837 :   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
    5382      706788 : }
    5383             : 
    5384     3366819 : void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
    5385     3366819 :   StrongRootsList* list = new StrongRootsList();
    5386     3366826 :   list->next = strong_roots_list_;
    5387     3366826 :   list->start = start;
    5388     3366826 :   list->end = end;
    5389     3366826 :   strong_roots_list_ = list;
    5390     3366826 : }
    5391             : 
    5392     3366809 : void Heap::UnregisterStrongRoots(FullObjectSlot start) {
    5393             :   StrongRootsList* prev = nullptr;
    5394     3366809 :   StrongRootsList* list = strong_roots_list_;
    5395    13505892 :   while (list != nullptr) {
    5396     6772273 :     StrongRootsList* next = list->next;
    5397     6772273 :     if (list->start == start) {
    5398     3366809 :       if (prev) {
    5399         773 :         prev->next = next;
    5400             :       } else {
    5401     3366036 :         strong_roots_list_ = next;
    5402             :       }
    5403     3366809 :       delete list;
    5404             :     } else {
    5405             :       prev = list;
    5406             :     }
    5407             :     list = next;
    5408             :   }
    5409     3366810 : }
    5410             : 
    5411          56 : void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
    5412          56 :   set_builtins_constants_table(cache);
    5413          56 : }
    5414             : 
    5415          56 : void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
    5416             :   DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
    5417          56 :   set_interpreter_entry_trampoline_for_profiling(code);
    5418          56 : }
    5419             : 
    5420         207 : void Heap::AddDirtyJSFinalizationGroup(
    5421             :     JSFinalizationGroup finalization_group,
    5422             :     std::function<void(HeapObject object, ObjectSlot slot, Object target)>
    5423             :         gc_notify_updated_slot) {
    5424             :   DCHECK(dirty_js_finalization_groups()->IsUndefined(isolate()) ||
    5425             :          dirty_js_finalization_groups()->IsJSFinalizationGroup());
    5426             :   DCHECK(finalization_group->next()->IsUndefined(isolate()));
    5427             :   DCHECK(!finalization_group->scheduled_for_cleanup());
    5428         207 :   finalization_group->set_scheduled_for_cleanup(true);
    5429         207 :   finalization_group->set_next(dirty_js_finalization_groups());
    5430             :   gc_notify_updated_slot(
    5431             :       finalization_group,
    5432             :       finalization_group.RawField(JSFinalizationGroup::kNextOffset),
    5433         414 :       dirty_js_finalization_groups());
    5434         207 :   set_dirty_js_finalization_groups(finalization_group);
    5435             :   // Roots are rescanned after objects are moved, so no need to record a slot
    5436             :   // for the root pointing to the first JSFinalizationGroup.
    5437         207 : }
    5438             : 
    5439         172 : void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
    5440             :   DCHECK(FLAG_harmony_weak_refs);
    5441             :   DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
    5442             :          weak_refs_keep_during_job()->IsOrderedHashSet());
    5443             :   Handle<OrderedHashSet> table;
    5444         344 :   if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
    5445          82 :     table = isolate()->factory()->NewOrderedHashSet();
    5446             :   } else {
    5447             :     table =
    5448         180 :         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
    5449             :   }
    5450         172 :   table = OrderedHashSet::Add(isolate(), table, target);
    5451         172 :   set_weak_refs_keep_during_job(*table);
    5452         172 : }
    5453             : 
    5454      699474 : void Heap::ClearKeepDuringJobSet() {
    5455      699474 :   set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
    5456      699474 : }
    5457             : 
    5458           0 : size_t Heap::NumberOfTrackedHeapObjectTypes() {
    5459           0 :   return ObjectStats::OBJECT_STATS_COUNT;
    5460             : }
    5461             : 
    5462             : 
    5463           0 : size_t Heap::ObjectCountAtLastGC(size_t index) {
    5464           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5465             :     return 0;
    5466           0 :   return live_object_stats_->object_count_last_gc(index);
    5467             : }
    5468             : 
    5469             : 
    5470           0 : size_t Heap::ObjectSizeAtLastGC(size_t index) {
    5471           0 :   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
    5472             :     return 0;
    5473           0 :   return live_object_stats_->object_size_last_gc(index);
    5474             : }
    5475             : 
    5476             : 
    5477           0 : bool Heap::GetObjectTypeName(size_t index, const char** object_type,
    5478             :                              const char** object_sub_type) {
    5479           0 :   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
    5480             : 
    5481           0 :   switch (static_cast<int>(index)) {
    5482             : #define COMPARE_AND_RETURN_NAME(name) \
    5483             :   case name:                          \
    5484             :     *object_type = #name;             \
    5485             :     *object_sub_type = "";            \
    5486             :     return true;
    5487           0 :     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5488             : #undef COMPARE_AND_RETURN_NAME
    5489             : 
    5490             : #define COMPARE_AND_RETURN_NAME(name)                       \
    5491             :   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
    5492             :     *object_type = #name;                                   \
    5493             :     *object_sub_type = "";                                  \
    5494             :     return true;
    5495           0 :     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
    5496             : #undef COMPARE_AND_RETURN_NAME
    5497             :   }
    5498             :   return false;
    5499             : }
    5500             : 
    5501         246 : size_t Heap::NumberOfNativeContexts() {
    5502             :   int result = 0;
    5503         246 :   Object context = native_contexts_list();
    5504        1560 :   while (!context->IsUndefined(isolate())) {
    5505        1068 :     ++result;
    5506        1068 :     Context native_context = Context::cast(context);
    5507        1068 :     context = native_context->next_context_link();
    5508             :   }
    5509         246 :   return result;
    5510             : }
    5511             : 
    5512         246 : size_t Heap::NumberOfDetachedContexts() {
    5513             :   // The detached_contexts() array has two entries per detached context.
    5514         492 :   return detached_contexts()->length() / 2;
    5515             : }
    5516             : 
    5517         159 : const char* AllocationSpaceName(AllocationSpace space) {
    5518         159 :   switch (space) {
    5519             :     case NEW_SPACE:
    5520             :       return "NEW_SPACE";
    5521             :     case OLD_SPACE:
    5522           1 :       return "OLD_SPACE";
    5523             :     case CODE_SPACE:
    5524           0 :       return "CODE_SPACE";
    5525             :     case MAP_SPACE:
    5526           2 :       return "MAP_SPACE";
    5527             :     case LO_SPACE:
    5528           0 :       return "LO_SPACE";
    5529             :     case NEW_LO_SPACE:
    5530           0 :       return "NEW_LO_SPACE";
    5531             :     case RO_SPACE:
    5532         156 :       return "RO_SPACE";
    5533             :     default:
    5534           0 :       UNREACHABLE();
    5535             :   }
    5536             :   return nullptr;
    5537             : }
    5538             : 
    5539           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
    5540             :                                           ObjectSlot end) {
    5541           0 :   VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
    5542           0 : }
    5543             : 
    5544           0 : void VerifyPointersVisitor::VisitPointers(HeapObject host,
    5545             :                                           MaybeObjectSlot start,
    5546             :                                           MaybeObjectSlot end) {
    5547           0 :   VerifyPointers(host, start, end);
    5548           0 : }
    5549             : 
    5550           0 : void VerifyPointersVisitor::VisitRootPointers(Root root,
    5551             :                                               const char* description,
    5552             :                                               FullObjectSlot start,
    5553             :                                               FullObjectSlot end) {
    5554             :   VerifyPointersImpl(start, end);
    5555           0 : }
    5556             : 
    5557             : void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
    5558           0 :   CHECK(heap_->Contains(heap_object));
    5559           0 :   CHECK(heap_object->map()->IsMap());
    5560             : }
    5561             : 
    5562             : template <typename TSlot>
    5563             : void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
    5564           0 :   for (TSlot slot = start; slot < end; ++slot) {
    5565           0 :     typename TSlot::TObject object = *slot;
    5566           0 :     HeapObject heap_object;
    5567           0 :     if (object.GetHeapObject(&heap_object)) {
    5568             :       VerifyHeapObjectImpl(heap_object);
    5569             :     } else {
    5570           0 :       CHECK(object->IsSmi() || object->IsCleared());
    5571             :     }
    5572             :   }
    5573             : }
    5574             : 
    5575           0 : void VerifyPointersVisitor::VerifyPointers(HeapObject host,
    5576             :                                            MaybeObjectSlot start,
    5577             :                                            MaybeObjectSlot end) {
    5578             :   // If this DCHECK fires then you probably added a pointer field
    5579             :   // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
    5580             :   // this by moving that object to POINTER_VISITOR_ID_LIST.
    5581             :   DCHECK_EQ(ObjectFields::kMaybePointers,
    5582             :             Map::ObjectFieldsFrom(host->map()->visitor_id()));
    5583             :   VerifyPointersImpl(start, end);
    5584           0 : }
    5585             : 
    5586           0 : void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
    5587           0 :   Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    5588             :   VerifyHeapObjectImpl(target);
    5589           0 : }
    5590             : 
    5591           0 : void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
    5592             :   VerifyHeapObjectImpl(rinfo->target_object());
    5593           0 : }
    5594             : 
    5595           0 : void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
    5596             :                                           FullObjectSlot start,
    5597             :                                           FullObjectSlot end) {
    5598           0 :   for (FullObjectSlot current = start; current < end; ++current) {
    5599           0 :     CHECK((*current)->IsSmi());
    5600             :   }
    5601           0 : }
    5602             : 
    5603           0 : bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
    5604             :   // Object migration is governed by the following rules:
    5605             :   //
    5606             :   // 1) Objects in new-space can be migrated to the old space
    5607             :   //    that matches their target space or they stay in new-space.
    5608             :   // 2) Objects in old-space stay in the same space when migrating.
    5609             :   // 3) Fillers (two or more words) can migrate due to left-trimming of
    5610             :   //    fixed arrays in new-space or old space.
    5611             :   // 4) Fillers (one word) can never migrate, they are skipped by
    5612             :   //    incremental marking explicitly to prevent invalid pattern.
    5613             :   //
    5614             :   // Since this function is used for debugging only, we do not place
    5615             :   // asserts here, but check everything explicitly.
    5616           0 :   if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
    5617             :   InstanceType type = obj->map()->instance_type();
    5618             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    5619           0 :   AllocationSpace src = chunk->owner()->identity();
    5620           0 :   switch (src) {
    5621             :     case NEW_SPACE:
    5622           0 :       return dst == NEW_SPACE || dst == OLD_SPACE;
    5623             :     case OLD_SPACE:
    5624           0 :       return dst == OLD_SPACE;
    5625             :     case CODE_SPACE:
    5626           0 :       return dst == CODE_SPACE && type == CODE_TYPE;
    5627             :     case MAP_SPACE:
    5628             :     case LO_SPACE:
    5629             :     case CODE_LO_SPACE:
    5630             :     case NEW_LO_SPACE:
    5631             :     case RO_SPACE:
    5632             :       return false;
    5633             :   }
    5634           0 :   UNREACHABLE();
    5635             : }
    5636             : 
    5637           0 : void Heap::CreateObjectStats() {
    5638           0 :   if (V8_LIKELY(FLAG_gc_stats == 0)) return;
    5639           0 :   if (!live_object_stats_) {
    5640           0 :     live_object_stats_.reset(new ObjectStats(this));
    5641             :   }
    5642           0 :   if (!dead_object_stats_) {
    5643           0 :     dead_object_stats_.reset(new ObjectStats(this));
    5644             :   }
    5645             : }
    5646             : 
    5647    22368348 : void AllocationObserver::AllocationStep(int bytes_allocated,
    5648             :                                         Address soon_object, size_t size) {
    5649             :   DCHECK_GE(bytes_allocated, 0);
    5650    22368348 :   bytes_to_next_step_ -= bytes_allocated;
    5651    22368348 :   if (bytes_to_next_step_ <= 0) {
    5652      210275 :     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
    5653      210275 :     step_size_ = GetNextStepSize();
    5654      210275 :     bytes_to_next_step_ = step_size_;
    5655             :   }
    5656             :   DCHECK_GE(bytes_to_next_step_, 0);
    5657    22368348 : }
    5658             : 
    5659             : namespace {
    5660             : 
    5661     2294871 : Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
    5662             :   MapWord map_word = object->map_word();
    5663             :   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
    5664     4589742 :                                         : map_word.ToMap();
    5665             : }
    5666             : 
    5667     2294871 : int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
    5668     2294871 :   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
    5669             : }
    5670             : 
    5671             : Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
    5672             :   Code code = Code::unchecked_cast(object);
    5673             :   DCHECK(!code.is_null());
    5674             :   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
    5675             :   return code;
    5676             : }
    5677             : 
    5678             : }  // namespace
    5679             : 
    5680           0 : bool Heap::GcSafeCodeContains(Code code, Address addr) {
    5681           0 :   Map map = GcSafeMapOfCodeSpaceObject(code);
    5682             :   DCHECK(map == ReadOnlyRoots(this).code_map());
    5683           0 :   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
    5684             :   Address start = code->address();
    5685           0 :   Address end = code->address() + code->SizeFromMap(map);
    5686           0 :   return start <= addr && addr < end;
    5687             : }
    5688             : 
    5689     2350962 : Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
    5690     1291494 :   Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
    5691     1291496 :   if (!code.is_null()) return code;
    5692             : 
    5693             :   // Check if the inner pointer points into a large object chunk.
    5694      529740 :   LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
    5695      529740 :   if (large_page != nullptr) {
    5696             :     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
    5697             :   }
    5698             : 
    5699             :   DCHECK(code_space()->Contains(inner_pointer));
    5700             : 
    5701             :   // Iterate through the page until we reach the end or find an object starting
    5702             :   // after the inner pointer.
    5703             :   Page* page = Page::FromAddress(inner_pointer);
    5704             :   DCHECK_EQ(page->owner(), code_space());
    5705      529730 :   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
    5706             : 
    5707      529728 :   Address addr = page->skip_list()->StartFor(inner_pointer);
    5708             :   Address top = code_space()->top();
    5709             :   Address limit = code_space()->limit();
    5710             : 
    5711             :   while (true) {
    5712     2309206 :     if (addr == top && addr != limit) {
    5713             :       addr = limit;
    5714             :       continue;
    5715             :     }
    5716             : 
    5717             :     HeapObject obj = HeapObject::FromAddress(addr);
    5718     2294871 :     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
    5719     2294873 :     Address next_addr = addr + obj_size;
    5720     2294873 :     if (next_addr > inner_pointer) {
    5721             :       return GcSafeCastToCode(this, obj, inner_pointer);
    5722             :     }
    5723             :     addr = next_addr;
    5724             :   }
    5725             : }
    5726             : 
    5727          40 : void Heap::WriteBarrierForCodeSlow(Code code) {
    5728          85 :   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
    5729           5 :        !it.done(); it.next()) {
    5730          10 :     GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5731          10 :     MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
    5732             :   }
    5733          40 : }
    5734             : 
    5735           0 : void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
    5736             :                                    HeapObject value) {
    5737             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5738             :   heap->store_buffer()->InsertEntry(slot);
    5739           0 : }
    5740             : 
    5741         984 : void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
    5742             :                                               int offset, int length) {
    5743     1558404 :   for (int i = 0; i < length; i++) {
    5744     3114840 :     if (!InYoungGeneration(array->get(offset + i))) continue;
    5745             :     heap->store_buffer()->InsertEntry(
    5746             :         array->RawFieldOfElementAt(offset + i).address());
    5747             :   }
    5748         984 : }
    5749             : 
    5750      435020 : void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5751             :                                           HeapObject object) {
    5752             :   DCHECK(InYoungGeneration(object));
    5753             :   Page* source_page = Page::FromHeapObject(host);
    5754             :   RelocInfo::Mode rmode = rinfo->rmode();
    5755             :   Address addr = rinfo->pc();
    5756             :   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    5757      217510 :   if (rinfo->IsInConstantPool()) {
    5758             :     addr = rinfo->constant_pool_entry_address();
    5759             :     if (RelocInfo::IsCodeTargetMode(rmode)) {
    5760             :       slot_type = CODE_ENTRY_SLOT;
    5761             :     } else {
    5762             :       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    5763             :       slot_type = OBJECT_SLOT;
    5764             :     }
    5765             :   }
    5766      435020 :   uintptr_t offset = addr - source_page->address();
    5767             :   DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
    5768             :   RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
    5769      217510 :                                          static_cast<uint32_t>(offset));
    5770      217510 : }
    5771             : 
    5772           0 : void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
    5773             :                               HeapObject value) {
    5774             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5775             :   heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
    5776   285501829 :                                                value);
    5777           0 : }
    5778             : 
    5779         535 : void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
    5780             :   IncrementalMarking::MarkingState* marking_state =
    5781             :       heap->incremental_marking()->marking_state();
    5782         535 :   if (!marking_state->IsBlack(object)) {
    5783             :     marking_state->WhiteToGrey(object);
    5784             :     marking_state->GreyToBlack(object);
    5785             :   }
    5786         535 :   heap->incremental_marking()->RevisitObject(object);
    5787         535 : }
    5788             : 
    5789           0 : void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
    5790             :                                      HeapObject object) {
    5791             :   Heap* heap = Heap::FromWritableHeapObject(host);
    5792             :   DCHECK(heap->incremental_marking()->IsMarking());
    5793      291888 :   heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
    5794           0 : }
    5795             : 
    5796     9252427 : void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
    5797             :                                                 HeapObject raw_descriptor_array,
    5798             :                                                 int number_of_own_descriptors) {
    5799             :   DCHECK(heap->incremental_marking()->IsMarking());
    5800             :   DescriptorArray descriptor_array =
    5801             :       DescriptorArray::cast(raw_descriptor_array);
    5802             :   int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
    5803     9252428 :   if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
    5804    18504856 :                                         raw_marked) <
    5805             :       number_of_own_descriptors) {
    5806             :     heap->incremental_marking()->VisitDescriptors(host, descriptor_array,
    5807     3462421 :                                                   number_of_own_descriptors);
    5808             :   }
    5809     9252429 : }
    5810             : 
    5811           0 : bool Heap::PageFlagsAreConsistent(HeapObject object) {
    5812             :   Heap* heap = Heap::FromWritableHeapObject(object);
    5813           0 :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    5814             :   heap_internals::MemoryChunk* slim_chunk =
    5815             :       heap_internals::MemoryChunk::FromHeapObject(object);
    5816             : 
    5817             :   const bool generation_consistency =
    5818           0 :       chunk->owner()->identity() != NEW_SPACE ||
    5819           0 :       (chunk->InYoungGeneration() && slim_chunk->InYoungGeneration());
    5820             :   const bool marking_consistency =
    5821           0 :       !heap->incremental_marking()->IsMarking() ||
    5822           0 :       (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
    5823             :        slim_chunk->IsMarking());
    5824             : 
    5825           0 :   return generation_consistency && marking_consistency;
    5826             : }
    5827             : 
    5828             : static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
    5829             :                   heap_internals::MemoryChunk::kMarkingBit,
    5830             :               "Incremental marking flag inconsistent");
    5831             : static_assert(MemoryChunk::Flag::FROM_PAGE ==
    5832             :                   heap_internals::MemoryChunk::kFromPageBit,
    5833             :               "From page flag inconsistent");
    5834             : static_assert(MemoryChunk::Flag::TO_PAGE ==
    5835             :                   heap_internals::MemoryChunk::kToPageBit,
    5836             :               "To page flag inconsistent");
    5837             : static_assert(MemoryChunk::kFlagsOffset ==
    5838             :                   heap_internals::MemoryChunk::kFlagsOffset,
    5839             :               "Flag offset inconsistent");
    5840             : static_assert(MemoryChunk::kHeapOffset ==
    5841             :                   heap_internals::MemoryChunk::kHeapOffset,
    5842             :               "Heap offset inconsistent");
    5843             : static_assert(MemoryChunk::kOwnerOffset ==
    5844             :                   heap_internals::MemoryChunk::kOwnerOffset,
    5845             :               "Owner offset inconsistent");
    5846             : 
    5847           5 : void Heap::SetEmbedderStackStateForNextFinalizaton(
    5848             :     EmbedderHeapTracer::EmbedderStackState stack_state) {
    5849             :   local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
    5850           5 :       stack_state);
    5851           5 : }
    5852             : 
    5853             : #ifdef DEBUG
    5854             : void Heap::IncrementObjectCounters() {
    5855             :   isolate_->counters()->objs_since_last_full()->Increment();
    5856             :   isolate_->counters()->objs_since_last_young()->Increment();
    5857             : }
    5858             : #endif  // DEBUG
    5859             : 
    5860             : }  // namespace internal
    5861      178779 : }  // namespace v8

Generated by: LCOV version 1.10