LCOV - code coverage report
Current view: top level - src/heap - mark-compact.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1043 1513 68.9 %
Date: 2019-03-21 Functions: 158 299 52.8 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/mark-compact.h"
       6             : 
       7             : #include <unordered_map>
       8             : 
       9             : #include "src/base/utils/random-number-generator.h"
      10             : #include "src/cancelable-task.h"
      11             : #include "src/compilation-cache.h"
      12             : #include "src/deoptimizer.h"
      13             : #include "src/execution.h"
      14             : #include "src/frames-inl.h"
      15             : #include "src/global-handles.h"
      16             : #include "src/heap/array-buffer-collector.h"
      17             : #include "src/heap/array-buffer-tracker-inl.h"
      18             : #include "src/heap/gc-tracer.h"
      19             : #include "src/heap/incremental-marking-inl.h"
      20             : #include "src/heap/invalidated-slots-inl.h"
      21             : #include "src/heap/item-parallel-job.h"
      22             : #include "src/heap/local-allocator-inl.h"
      23             : #include "src/heap/mark-compact-inl.h"
      24             : #include "src/heap/object-stats.h"
      25             : #include "src/heap/objects-visiting-inl.h"
      26             : #include "src/heap/spaces-inl.h"
      27             : #include "src/heap/sweeper.h"
      28             : #include "src/heap/worklist.h"
      29             : #include "src/ic/stub-cache.h"
      30             : #include "src/objects/embedder-data-array-inl.h"
      31             : #include "src/objects/foreign.h"
      32             : #include "src/objects/hash-table-inl.h"
      33             : #include "src/objects/js-objects-inl.h"
      34             : #include "src/objects/maybe-object.h"
      35             : #include "src/objects/slots-inl.h"
      36             : #include "src/transitions-inl.h"
      37             : #include "src/utils-inl.h"
      38             : #include "src/v8.h"
      39             : #include "src/vm-state-inl.h"
      40             : 
      41             : namespace v8 {
      42             : namespace internal {
      43             : 
      44             : const char* Marking::kWhiteBitPattern = "00";
      45             : const char* Marking::kBlackBitPattern = "11";
      46             : const char* Marking::kGreyBitPattern = "10";
      47             : const char* Marking::kImpossibleBitPattern = "01";
      48             : 
      49             : // The following has to hold in order for {MarkingState::MarkBitFrom} to not
      50             : // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
      51             : STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
      52             : 
      53             : // =============================================================================
      54             : // Verifiers
      55             : // =============================================================================
      56             : 
      57             : #ifdef VERIFY_HEAP
      58             : namespace {
      59             : 
      60             : class MarkingVerifier : public ObjectVisitor, public RootVisitor {
      61             :  public:
      62             :   virtual void Run() = 0;
      63             : 
      64             :  protected:
      65             :   explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
      66             : 
      67             :   virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
      68             :       const MemoryChunk* chunk) = 0;
      69             : 
      70             :   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
      71             :   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
      72             :   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
      73             : 
      74             :   virtual bool IsMarked(HeapObject object) = 0;
      75             : 
      76             :   virtual bool IsBlackOrGrey(HeapObject object) = 0;
      77             : 
      78             :   void VisitPointers(HeapObject host, ObjectSlot start,
      79             :                      ObjectSlot end) override {
      80             :     VerifyPointers(start, end);
      81             :   }
      82             : 
      83             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
      84             :                      MaybeObjectSlot end) override {
      85             :     VerifyPointers(start, end);
      86             :   }
      87             : 
      88             :   void VisitRootPointers(Root root, const char* description,
      89             :                          FullObjectSlot start, FullObjectSlot end) override {
      90             :     VerifyRootPointers(start, end);
      91             :   }
      92             : 
      93             :   void VerifyRoots(VisitMode mode);
      94             :   void VerifyMarkingOnPage(const Page* page, Address start, Address end);
      95             :   void VerifyMarking(NewSpace* new_space);
      96             :   void VerifyMarking(PagedSpace* paged_space);
      97             :   void VerifyMarking(LargeObjectSpace* lo_space);
      98             : 
      99             :   Heap* heap_;
     100             : };
     101             : 
     102             : void MarkingVerifier::VerifyRoots(VisitMode mode) {
     103             :   heap_->IterateStrongRoots(this, mode);
     104             : }
     105             : 
     106             : void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
     107             :                                           Address end) {
     108             :   HeapObject object;
     109             :   Address next_object_must_be_here_or_later = start;
     110             :   for (Address current = start; current < end;) {
     111             :     object = HeapObject::FromAddress(current);
     112             :     // One word fillers at the end of a black area can be grey.
     113             :     if (IsBlackOrGrey(object) &&
     114             :         object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
     115             :       CHECK(IsMarked(object));
     116             :       CHECK(current >= next_object_must_be_here_or_later);
     117             :       object->Iterate(this);
     118             :       next_object_must_be_here_or_later = current + object->Size();
     119             :       // The object is either part of a black area of black allocation or a
     120             :       // regular black object
     121             :       CHECK(
     122             :           bitmap(page)->AllBitsSetInRange(
     123             :               page->AddressToMarkbitIndex(current),
     124             :               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
     125             :           bitmap(page)->AllBitsClearInRange(
     126             :               page->AddressToMarkbitIndex(current + kTaggedSize * 2),
     127             :               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
     128             :       current = next_object_must_be_here_or_later;
     129             :     } else {
     130             :       current += kTaggedSize;
     131             :     }
     132             :   }
     133             : }
     134             : 
     135             : void MarkingVerifier::VerifyMarking(NewSpace* space) {
     136             :   Address end = space->top();
     137             :   // The bottom position is at the start of its page. Allows us to use
     138             :   // page->area_start() as start of range on all pages.
     139             :   CHECK_EQ(space->first_allocatable_address(),
     140             :            space->first_page()->area_start());
     141             : 
     142             :   PageRange range(space->first_allocatable_address(), end);
     143             :   for (auto it = range.begin(); it != range.end();) {
     144             :     Page* page = *(it++);
     145             :     Address limit = it != range.end() ? page->area_end() : end;
     146             :     CHECK(limit == end || !page->Contains(end));
     147             :     VerifyMarkingOnPage(page, page->area_start(), limit);
     148             :   }
     149             : }
     150             : 
     151             : void MarkingVerifier::VerifyMarking(PagedSpace* space) {
     152             :   for (Page* p : *space) {
     153             :     VerifyMarkingOnPage(p, p->area_start(), p->area_end());
     154             :   }
     155             : }
     156             : 
     157             : void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
     158             :   LargeObjectIterator it(lo_space);
     159             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
     160             :     if (IsBlackOrGrey(obj)) {
     161             :       obj->Iterate(this);
     162             :     }
     163             :   }
     164             : }
     165             : 
     166             : class FullMarkingVerifier : public MarkingVerifier {
     167             :  public:
     168             :   explicit FullMarkingVerifier(Heap* heap)
     169             :       : MarkingVerifier(heap),
     170             :         marking_state_(
     171             :             heap->mark_compact_collector()->non_atomic_marking_state()) {}
     172             : 
     173             :   void Run() override {
     174             :     VerifyRoots(VISIT_ONLY_STRONG);
     175             :     VerifyMarking(heap_->new_space());
     176             :     VerifyMarking(heap_->new_lo_space());
     177             :     VerifyMarking(heap_->old_space());
     178             :     VerifyMarking(heap_->code_space());
     179             :     VerifyMarking(heap_->map_space());
     180             :     VerifyMarking(heap_->lo_space());
     181             :     VerifyMarking(heap_->code_lo_space());
     182             :   }
     183             : 
     184             :  protected:
     185             :   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
     186             :       const MemoryChunk* chunk) override {
     187             :     return marking_state_->bitmap(chunk);
     188             :   }
     189             : 
     190             :   bool IsMarked(HeapObject object) override {
     191             :     return marking_state_->IsBlack(object);
     192             :   }
     193             : 
     194             :   bool IsBlackOrGrey(HeapObject object) override {
     195             :     return marking_state_->IsBlackOrGrey(object);
     196             :   }
     197             : 
     198             :   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
     199             :     VerifyPointersImpl(start, end);
     200             :   }
     201             : 
     202             :   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
     203             :     VerifyPointersImpl(start, end);
     204             :   }
     205             : 
     206             :   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
     207             :     VerifyPointersImpl(start, end);
     208             :   }
     209             : 
     210             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
     211             :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     212             :     VerifyHeapObjectImpl(target);
     213             :   }
     214             : 
     215             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
     216             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     217             :     if (!host->IsWeakObject(rinfo->target_object())) {
     218             :       HeapObject object = rinfo->target_object();
     219             :       VerifyHeapObjectImpl(object);
     220             :     }
     221             :   }
     222             : 
     223             :  private:
     224             :   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
     225             :     CHECK(marking_state_->IsBlackOrGrey(heap_object));
     226             :   }
     227             : 
     228             :   template <typename TSlot>
     229             :   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
     230             :     for (TSlot slot = start; slot < end; ++slot) {
     231             :       typename TSlot::TObject object = *slot;
     232             :       HeapObject heap_object;
     233             :       if (object.GetHeapObjectIfStrong(&heap_object)) {
     234             :         VerifyHeapObjectImpl(heap_object);
     235             :       }
     236             :     }
     237             :   }
     238             : 
     239             :   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
     240             : };
     241             : 
     242             : class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
     243             :  public:
     244             :   virtual void Run() = 0;
     245             : 
     246             :   void VisitPointers(HeapObject host, ObjectSlot start,
     247             :                      ObjectSlot end) override {
     248             :     VerifyPointers(start, end);
     249             :   }
     250             : 
     251             :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     252             :                      MaybeObjectSlot end) override {
     253             :     VerifyPointers(start, end);
     254             :   }
     255             : 
     256             :   void VisitRootPointers(Root root, const char* description,
     257             :                          FullObjectSlot start, FullObjectSlot end) override {
     258             :     VerifyRootPointers(start, end);
     259             :   }
     260             : 
     261             :  protected:
     262             :   explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
     263             : 
     264             :   inline Heap* heap() { return heap_; }
     265             : 
     266             :   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
     267             :   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
     268             :   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
     269             : 
     270             :   void VerifyRoots(VisitMode mode);
     271             :   void VerifyEvacuationOnPage(Address start, Address end);
     272             :   void VerifyEvacuation(NewSpace* new_space);
     273             :   void VerifyEvacuation(PagedSpace* paged_space);
     274             : 
     275             :   Heap* heap_;
     276             : };
     277             : 
     278             : void EvacuationVerifier::VerifyRoots(VisitMode mode) {
     279             :   heap_->IterateStrongRoots(this, mode);
     280             : }
     281             : 
     282             : void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
     283             :   Address current = start;
     284             :   while (current < end) {
     285             :     HeapObject object = HeapObject::FromAddress(current);
     286             :     if (!object->IsFiller()) object->Iterate(this);
     287             :     current += object->Size();
     288             :   }
     289             : }
     290             : 
     291             : void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
     292             :   PageRange range(space->first_allocatable_address(), space->top());
     293             :   for (auto it = range.begin(); it != range.end();) {
     294             :     Page* page = *(it++);
     295             :     Address current = page->area_start();
     296             :     Address limit = it != range.end() ? page->area_end() : space->top();
     297             :     CHECK(limit == space->top() || !page->Contains(space->top()));
     298             :     VerifyEvacuationOnPage(current, limit);
     299             :   }
     300             : }
     301             : 
     302             : void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
     303             :   for (Page* p : *space) {
     304             :     if (p->IsEvacuationCandidate()) continue;
     305             :     if (p->Contains(space->top())) {
     306             :       CodePageMemoryModificationScope memory_modification_scope(p);
     307             :       heap_->CreateFillerObjectAt(
     308             :           space->top(), static_cast<int>(space->limit() - space->top()),
     309             :           ClearRecordedSlots::kNo);
     310             :     }
     311             :     VerifyEvacuationOnPage(p->area_start(), p->area_end());
     312             :   }
     313             : }
     314             : 
     315             : class FullEvacuationVerifier : public EvacuationVerifier {
     316             :  public:
     317             :   explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
     318             : 
     319             :   void Run() override {
     320             :     VerifyRoots(VISIT_ALL);
     321             :     VerifyEvacuation(heap_->new_space());
     322             :     VerifyEvacuation(heap_->old_space());
     323             :     VerifyEvacuation(heap_->code_space());
     324             :     VerifyEvacuation(heap_->map_space());
     325             :   }
     326             : 
     327             :  protected:
     328             :   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
     329             :     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
     330             :                   Heap::InToPage(heap_object));
     331             :     CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
     332             :   }
     333             : 
     334             :   template <typename TSlot>
     335             :   void VerifyPointersImpl(TSlot start, TSlot end) {
     336             :     for (TSlot current = start; current < end; ++current) {
     337             :       typename TSlot::TObject object = *current;
     338             :       HeapObject heap_object;
     339             :       if (object.GetHeapObjectIfStrong(&heap_object)) {
     340             :         VerifyHeapObjectImpl(heap_object);
     341             :       }
     342             :     }
     343             :   }
     344             : 
     345             :   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
     346             :     VerifyPointersImpl(start, end);
     347             :   }
     348             :   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
     349             :     VerifyPointersImpl(start, end);
     350             :   }
     351             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
     352             :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     353             :     VerifyHeapObjectImpl(target);
     354             :   }
     355             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
     356             :     VerifyHeapObjectImpl(rinfo->target_object());
     357             :   }
     358             :   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
     359             :     VerifyPointersImpl(start, end);
     360             :   }
     361             : };
     362             : 
     363             : }  // namespace
     364             : #endif  // VERIFY_HEAP
     365             : 
     366             : // =============================================================================
     367             : // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
     368             : // =============================================================================
     369             : 
     370             : using MarkCompactMarkingVisitor =
     371             :     MarkingVisitor<FixedArrayVisitationMode::kRegular,
     372             :                    TraceRetainingPathMode::kEnabled,
     373             :                    MarkCompactCollector::MarkingState>;
     374             : 
     375             : namespace {
     376             : 
     377      282370 : int NumberOfAvailableCores() {
     378      282370 :   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
     379             :   // This number of cores should be greater than zero and never change.
     380             :   DCHECK_GE(num_cores, 1);
     381             :   DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
     382      282370 :   return num_cores;
     383             : }
     384             : 
     385             : }  // namespace
     386             : 
     387       61254 : int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
     388             :   DCHECK_GT(pages, 0);
     389             :   int tasks =
     390       61254 :       FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
     391       61254 :   if (!heap_->CanExpandOldGeneration(
     392       61254 :           static_cast<size_t>(tasks * Page::kPageSize))) {
     393             :     // Optimize for memory usage near the heap limit.
     394             :     tasks = 1;
     395             :   }
     396       61254 :   return tasks;
     397             : }
     398             : 
     399      147483 : int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
     400             :                                                                  int slots) {
     401             :   DCHECK_GT(pages, 0);
     402             :   // Limit the number of update tasks as task creation often dominates the
     403             :   // actual work that is being done.
     404             :   const int kMaxPointerUpdateTasks = 8;
     405             :   const int kSlotsPerTask = 600;
     406             :   const int wanted_tasks =
     407      147483 :       (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
     408             :   return FLAG_parallel_pointer_update
     409      147363 :              ? Min(kMaxPointerUpdateTasks,
     410             :                    Min(NumberOfAvailableCores(), wanted_tasks))
     411      294966 :              : 1;
     412             : }
     413             : 
     414           0 : int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
     415             :     int pages) {
     416             :   DCHECK_GT(pages, 0);
     417             :   // No cap needed because all pages we need to process are fully filled with
     418             :   // interesting objects.
     419       73895 :   return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
     420       73955 :                                       : 1;
     421             : }
     422             : 
     423       61532 : MarkCompactCollector::MarkCompactCollector(Heap* heap)
     424             :     : MarkCompactCollectorBase(heap),
     425             :       page_parallel_job_semaphore_(0),
     426             : #ifdef DEBUG
     427             :       state_(IDLE),
     428             : #endif
     429             :       was_marked_incrementally_(false),
     430             :       evacuation_(false),
     431             :       compacting_(false),
     432             :       black_allocation_(false),
     433             :       have_code_to_deoptimize_(false),
     434             :       marking_worklist_(heap),
     435      123066 :       sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
     436       61534 :   old_to_new_slots_ = -1;
     437       61534 : }
     438             : 
     439      246074 : MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
     440             : 
     441       61534 : void MarkCompactCollector::SetUp() {
     442             :   DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
     443             :   DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
     444             :   DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
     445             :   DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
     446       61534 : }
     447             : 
     448       61518 : void MarkCompactCollector::TearDown() {
     449       61518 :   AbortCompaction();
     450       61518 :   AbortWeakObjects();
     451       61519 :   if (heap()->incremental_marking()->IsMarking()) {
     452        3799 :     marking_worklist()->Clear();
     453             :   }
     454       61519 : }
     455             : 
     456           0 : void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
     457             :   DCHECK(!p->NeverEvacuate());
     458        7072 :   p->MarkEvacuationCandidate();
     459        7072 :   evacuation_candidates_.push_back(p);
     460           0 : }
     461             : 
     462             : 
     463           0 : static void TraceFragmentation(PagedSpace* space) {
     464           0 :   int number_of_pages = space->CountTotalPages();
     465           0 :   intptr_t reserved = (number_of_pages * space->AreaSize());
     466           0 :   intptr_t free = reserved - space->SizeOfObjects();
     467           0 :   PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
     468           0 :          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
     469           0 : }
     470             : 
     471       77754 : bool MarkCompactCollector::StartCompaction() {
     472       77754 :   if (!compacting_) {
     473             :     DCHECK(evacuation_candidates_.empty());
     474             : 
     475       77754 :     CollectEvacuationCandidates(heap()->old_space());
     476             : 
     477       77754 :     if (FLAG_compact_code_space) {
     478       77754 :       CollectEvacuationCandidates(heap()->code_space());
     479           0 :     } else if (FLAG_trace_fragmentation) {
     480           0 :       TraceFragmentation(heap()->code_space());
     481             :     }
     482             : 
     483       77754 :     if (FLAG_trace_fragmentation) {
     484           0 :       TraceFragmentation(heap()->map_space());
     485             :     }
     486             : 
     487       77754 :     compacting_ = !evacuation_candidates_.empty();
     488             :   }
     489             : 
     490       77754 :   return compacting_;
     491             : }
     492             : 
     493       73955 : void MarkCompactCollector::CollectGarbage() {
     494             :   // Make sure that Prepare() has been called. The individual steps below will
     495             :   // update the state as they proceed.
     496             :   DCHECK(state_ == PREPARE_GC);
     497             : 
     498             : #ifdef ENABLE_MINOR_MC
     499       73955 :   heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
     500             : #endif  // ENABLE_MINOR_MC
     501             : 
     502       73955 :   MarkLiveObjects();
     503       73955 :   ClearNonLiveReferences();
     504       73955 :   VerifyMarking();
     505             : 
     506       73955 :   RecordObjectStats();
     507             : 
     508       73955 :   StartSweepSpaces();
     509             : 
     510       73955 :   Evacuate();
     511             : 
     512       73955 :   Finish();
     513       73955 : }
     514             : 
     515             : #ifdef VERIFY_HEAP
     516             : void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
     517             :   HeapObjectIterator iterator(space);
     518             :   for (HeapObject object = iterator.Next(); !object.is_null();
     519             :        object = iterator.Next()) {
     520             :     CHECK(non_atomic_marking_state()->IsBlack(object));
     521             :   }
     522             : }
     523             : 
     524             : void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
     525             :   for (Page* p : *space) {
     526             :     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
     527             :     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
     528             :   }
     529             : }
     530             : 
     531             : 
     532             : void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
     533             :   for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
     534             :     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
     535             :     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
     536             :   }
     537             : }
     538             : 
     539             : void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
     540             :   LargeObjectIterator it(space);
     541             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
     542             :     CHECK(non_atomic_marking_state()->IsWhite(obj));
     543             :     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
     544             :                     MemoryChunk::FromHeapObject(obj)));
     545             :   }
     546             : }
     547             : 
     548             : void MarkCompactCollector::VerifyMarkbitsAreClean() {
     549             :   VerifyMarkbitsAreClean(heap_->old_space());
     550             :   VerifyMarkbitsAreClean(heap_->code_space());
     551             :   VerifyMarkbitsAreClean(heap_->map_space());
     552             :   VerifyMarkbitsAreClean(heap_->new_space());
     553             :   // Read-only space should always be black since we never collect any objects
     554             :   // in it or linked from it.
     555             :   VerifyMarkbitsAreDirty(heap_->read_only_space());
     556             :   VerifyMarkbitsAreClean(heap_->lo_space());
     557             :   VerifyMarkbitsAreClean(heap_->code_lo_space());
     558             :   VerifyMarkbitsAreClean(heap_->new_lo_space());
     559             : }
     560             : 
     561             : #endif  // VERIFY_HEAP
     562             : 
     563      172379 : void MarkCompactCollector::EnsureSweepingCompleted() {
     564      172379 :   if (!sweeper()->sweeping_in_progress()) return;
     565             : 
     566       73955 :   sweeper()->EnsureCompleted();
     567       73955 :   heap()->old_space()->RefillFreeList();
     568       73955 :   heap()->code_space()->RefillFreeList();
     569       73955 :   heap()->map_space()->RefillFreeList();
     570             : 
     571             : #ifdef VERIFY_HEAP
     572             :   if (FLAG_verify_heap && !evacuation()) {
     573             :     FullEvacuationVerifier verifier(heap());
     574             :     verifier.Run();
     575             :   }
     576             : #endif
     577             : }
     578             : 
     579      153476 : void MarkCompactCollector::ComputeEvacuationHeuristics(
     580             :     size_t area_size, int* target_fragmentation_percent,
     581             :     size_t* max_evacuated_bytes) {
     582             :   // For memory reducing and optimize for memory mode we directly define both
     583             :   // constants.
     584             :   const int kTargetFragmentationPercentForReduceMemory = 20;
     585             :   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
     586             :   const int kTargetFragmentationPercentForOptimizeMemory = 20;
     587             :   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
     588             : 
     589             :   // For regular mode (which is latency critical) we define less aggressive
     590             :   // defaults to start and switch to a trace-based (using compaction speed)
     591             :   // approach as soon as we have enough samples.
     592             :   const int kTargetFragmentationPercent = 70;
     593             :   const size_t kMaxEvacuatedBytes = 4 * MB;
     594             :   // Time to take for a single area (=payload of page). Used as soon as there
     595             :   // exist enough compaction speed samples.
     596             :   const float kTargetMsPerArea = .5;
     597             : 
     598      153476 :   if (heap()->ShouldReduceMemory()) {
     599       23604 :     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
     600       23604 :     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
     601      129872 :   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
     602             :     *target_fragmentation_percent =
     603          20 :         kTargetFragmentationPercentForOptimizeMemory;
     604          20 :     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
     605             :   } else {
     606             :     const double estimated_compaction_speed =
     607      129852 :         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
     608      129852 :     if (estimated_compaction_speed != 0) {
     609             :       // Estimate the target fragmentation based on traced compaction speed
     610             :       // and a goal for a single page.
     611             :       const double estimated_ms_per_area =
     612       98314 :           1 + area_size / estimated_compaction_speed;
     613             :       *target_fragmentation_percent = static_cast<int>(
     614       98314 :           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
     615       98314 :       if (*target_fragmentation_percent <
     616             :           kTargetFragmentationPercentForReduceMemory) {
     617             :         *target_fragmentation_percent =
     618           0 :             kTargetFragmentationPercentForReduceMemory;
     619             :       }
     620             :     } else {
     621       31538 :       *target_fragmentation_percent = kTargetFragmentationPercent;
     622             :     }
     623      129852 :     *max_evacuated_bytes = kMaxEvacuatedBytes;
     624             :   }
     625      153476 : }
     626             : 
     627      155508 : void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
     628             :   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
     629             : 
     630      155508 :   int number_of_pages = space->CountTotalPages();
     631      155508 :   size_t area_size = space->AreaSize();
     632             : 
     633             :   // Pairs of (live_bytes_in_page, page).
     634             :   typedef std::pair<size_t, Page*> LiveBytesPagePair;
     635             :   std::vector<LiveBytesPagePair> pages;
     636      155508 :   pages.reserve(number_of_pages);
     637             : 
     638             :   DCHECK(!sweeping_in_progress());
     639             :   Page* owner_of_linear_allocation_area =
     640             :       space->top() == space->limit()
     641             :           ? nullptr
     642      155508 :           : Page::FromAllocationAreaAddress(space->top());
     643      471739 :   for (Page* p : *space) {
     644      431129 :     if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
     645             :         !p->CanAllocate())
     646             :       continue;
     647             :     // Invariant: Evacuation candidates are just created when marking is
     648             :     // started. This means that sweeping has finished. Furthermore, at the end
     649             :     // of a GC all evacuation candidates are cleared and their slot buffers are
     650             :     // released.
     651      114898 :     CHECK(!p->IsEvacuationCandidate());
     652      114898 :     CHECK_NULL(p->slot_set<OLD_TO_OLD>());
     653      114898 :     CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
     654      114898 :     CHECK(p->SweepingDone());
     655             :     DCHECK(p->area_size() == area_size);
     656      229796 :     pages.push_back(std::make_pair(p->allocated_bytes(), p));
     657             :   }
     658             : 
     659             :   int candidate_count = 0;
     660             :   size_t total_live_bytes = 0;
     661             : 
     662             :   const bool reduce_memory = heap()->ShouldReduceMemory();
     663      155508 :   if (FLAG_manual_evacuation_candidates_selection) {
     664        1362 :     for (size_t i = 0; i < pages.size(); i++) {
     665         484 :       Page* p = pages[i].second;
     666         484 :       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
     667         194 :         candidate_count++;
     668         194 :         total_live_bytes += pages[i].first;
     669             :         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     670             :         AddEvacuationCandidate(p);
     671             :       }
     672             :     }
     673      155114 :   } else if (FLAG_stress_compaction_random) {
     674           0 :     double fraction = isolate()->fuzzer_rng()->NextDouble();
     675             :     size_t pages_to_mark_count =
     676           0 :         static_cast<size_t>(fraction * (pages.size() + 1));
     677           0 :     for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
     678           0 :              pages.size(), pages_to_mark_count)) {
     679           0 :       candidate_count++;
     680           0 :       total_live_bytes += pages[i].first;
     681           0 :       AddEvacuationCandidate(pages[i].second);
     682             :     }
     683      155114 :   } else if (FLAG_stress_compaction) {
     684        9392 :     for (size_t i = 0; i < pages.size(); i++) {
     685        3877 :       Page* p = pages[i].second;
     686        3877 :       if (i % 2 == 0) {
     687        2328 :         candidate_count++;
     688        2328 :         total_live_bytes += pages[i].first;
     689             :         AddEvacuationCandidate(p);
     690             :       }
     691             :     }
     692             :   } else {
     693             :     // The following approach determines the pages that should be evacuated.
     694             :     //
     695             :     // We use two conditions to decide whether a page qualifies as an evacuation
     696             :     // candidate, or not:
     697             :     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
     698             :     //   between live bytes and capacity of this page (= area).
     699             :     // * Evacuation quota: A global quota determining how much bytes should be
     700             :     //   compacted.
     701             :     //
     702             :     // The algorithm sorts all pages by live bytes and then iterates through
     703             :     // them starting with the page with the most free memory, adding them to the
     704             :     // set of evacuation candidates as long as both conditions (fragmentation
     705             :     // and quota) hold.
     706             :     size_t max_evacuated_bytes;
     707             :     int target_fragmentation_percent;
     708             :     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
     709      153476 :                                 &max_evacuated_bytes);
     710             : 
     711             :     const size_t free_bytes_threshold =
     712      153476 :         target_fragmentation_percent * (area_size / 100);
     713             : 
     714             :     // Sort pages from the most free to the least free, then select
     715             :     // the first n pages for evacuation such that:
     716             :     // - the total size of evacuated objects does not exceed the specified
     717             :     // limit.
     718             :     // - fragmentation of (n+1)-th page does not exceed the specified limit.
     719             :     std::sort(pages.begin(), pages.end(),
     720             :               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
     721             :                 return a.first < b.first;
     722             :               });
     723      374550 :     for (size_t i = 0; i < pages.size(); i++) {
     724      110537 :       size_t live_bytes = pages[i].first;
     725             :       DCHECK_GE(area_size, live_bytes);
     726      110537 :       size_t free_bytes = area_size - live_bytes;
     727      110537 :       if (FLAG_always_compact ||
     728       36603 :           ((free_bytes >= free_bytes_threshold) &&
     729       36603 :            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
     730       36658 :         candidate_count++;
     731       36658 :         total_live_bytes += live_bytes;
     732             :       }
     733      110537 :       if (FLAG_trace_fragmentation_verbose) {
     734           0 :         PrintIsolate(isolate(),
     735             :                      "compaction-selection-page: space=%s free_bytes_page=%zu "
     736             :                      "fragmentation_limit_kb=%" PRIuS
     737             :                      " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
     738             :                      "compaction_limit_kb=%zu\n",
     739             :                      space->name(), free_bytes / KB, free_bytes_threshold / KB,
     740             :                      target_fragmentation_percent, total_live_bytes / KB,
     741           0 :                      max_evacuated_bytes / KB);
     742             :       }
     743             :     }
     744             :     // How many pages we will allocated for the evacuated objects
     745             :     // in the worst case: ceil(total_live_bytes / area_size)
     746             :     int estimated_new_pages =
     747      153476 :         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
     748             :     DCHECK_LE(estimated_new_pages, candidate_count);
     749             :     int estimated_released_pages = candidate_count - estimated_new_pages;
     750             :     // Avoid (compact -> expand) cycles.
     751      153476 :     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
     752             :       candidate_count = 0;
     753             :     }
     754      162576 :     for (int i = 0; i < candidate_count; i++) {
     755        9100 :       AddEvacuationCandidate(pages[i].second);
     756             :     }
     757             :   }
     758             : 
     759      155508 :   if (FLAG_trace_fragmentation) {
     760           0 :     PrintIsolate(isolate(),
     761             :                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
     762             :                  "total_live_bytes=%zu\n",
     763             :                  space->name(), reduce_memory, candidate_count,
     764           0 :                  total_live_bytes / KB);
     765             :   }
     766      155508 : }
     767             : 
     768             : 
     769       61517 : void MarkCompactCollector::AbortCompaction() {
     770       61517 :   if (compacting_) {
     771          37 :     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
     772         103 :     for (Page* p : evacuation_candidates_) {
     773             :       p->ClearEvacuationCandidate();
     774             :     }
     775          37 :     compacting_ = false;
     776             :     evacuation_candidates_.clear();
     777             :   }
     778             :   DCHECK(evacuation_candidates_.empty());
     779       61517 : }
     780             : 
     781             : 
     782       73955 : void MarkCompactCollector::Prepare() {
     783       73955 :   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
     784             : 
     785             : #ifdef DEBUG
     786             :   DCHECK(state_ == IDLE);
     787             :   state_ = PREPARE_GC;
     788             : #endif
     789             : 
     790             :   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
     791             : 
     792             :   // Instead of waiting we could also abort the sweeper threads here.
     793       73955 :   EnsureSweepingCompleted();
     794             : 
     795       73955 :   if (heap()->incremental_marking()->IsSweeping()) {
     796        3421 :     heap()->incremental_marking()->Stop();
     797             :   }
     798             : 
     799       73955 :   if (!was_marked_incrementally_) {
     800      210268 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
     801      105134 :     heap_->local_embedder_heap_tracer()->TracePrologue();
     802             :   }
     803             : 
     804             :   // Don't start compaction if we are in the middle of incremental
     805             :   // marking cycle. We did not collect any slots.
     806       73955 :   if (!FLAG_never_compact && !was_marked_incrementally_) {
     807       52567 :     StartCompaction();
     808             :   }
     809             : 
     810             :   PagedSpaces spaces(heap());
     811      295820 :   for (PagedSpace* space = spaces.next(); space != nullptr;
     812             :        space = spaces.next()) {
     813      221865 :     space->PrepareForMarkCompact();
     814             :   }
     815             :   heap()->account_external_memory_concurrently_freed();
     816             : 
     817             : #ifdef VERIFY_HEAP
     818             :   if (!was_marked_incrementally_ && FLAG_verify_heap) {
     819             :     VerifyMarkbitsAreClean();
     820             :   }
     821             : #endif
     822       73955 : }
     823             : 
     824      221904 : void MarkCompactCollector::FinishConcurrentMarking(
     825             :     ConcurrentMarking::StopRequest stop_request) {
     826             :   // FinishConcurrentMarking is called for both, concurrent and parallel,
     827             :   // marking. It is safe to call this function when tasks are already finished.
     828      221904 :   if (FLAG_parallel_marking || FLAG_concurrent_marking) {
     829      219621 :     heap()->concurrent_marking()->Stop(stop_request);
     830             :     heap()->concurrent_marking()->FlushMemoryChunkData(
     831      219621 :         non_atomic_marking_state());
     832             :   }
     833      221904 : }
     834             : 
     835       73955 : void MarkCompactCollector::VerifyMarking() {
     836       73955 :   CHECK(marking_worklist()->IsEmpty());
     837             :   DCHECK(heap_->incremental_marking()->IsStopped());
     838             : #ifdef VERIFY_HEAP
     839             :   if (FLAG_verify_heap) {
     840             :     FullMarkingVerifier verifier(heap());
     841             :     verifier.Run();
     842             :   }
     843             : #endif
     844             : #ifdef VERIFY_HEAP
     845             :   if (FLAG_verify_heap) {
     846             :     heap()->old_space()->VerifyLiveBytes();
     847             :     heap()->map_space()->VerifyLiveBytes();
     848             :     heap()->code_space()->VerifyLiveBytes();
     849             :   }
     850             : #endif
     851       73955 : }
     852             : 
     853       73955 : void MarkCompactCollector::Finish() {
     854      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
     855             : 
     856       73955 :   epoch_++;
     857             : 
     858             : #ifdef DEBUG
     859             :   heap()->VerifyCountersBeforeConcurrentSweeping();
     860             : #endif
     861             : 
     862       73955 :   CHECK(weak_objects_.current_ephemerons.IsEmpty());
     863       73955 :   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
     864             :   weak_objects_.next_ephemerons.Clear();
     865             : 
     866       73955 :   sweeper()->StartSweeperTasks();
     867       73955 :   sweeper()->StartIterabilityTasks();
     868             : 
     869             :   // Clear the marking state of live large objects.
     870       73955 :   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
     871       73955 :   heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
     872             : 
     873             : #ifdef DEBUG
     874             :   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
     875             :   state_ = IDLE;
     876             : #endif
     877       73955 :   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
     878             : 
     879             :   // The stub caches are not traversed during GC; clear them to force
     880             :   // their lazy re-initialization. This must be done after the
     881             :   // GC, because it relies on the new address of certain old space
     882             :   // objects (empty string, illegal builtin).
     883       73955 :   isolate()->load_stub_cache()->Clear();
     884       73955 :   isolate()->store_stub_cache()->Clear();
     885             : 
     886       73955 :   if (have_code_to_deoptimize_) {
     887             :     // Some code objects were marked for deoptimization during the GC.
     888          75 :     Deoptimizer::DeoptimizeMarkedCode(isolate());
     889          75 :     have_code_to_deoptimize_ = false;
     890             :   }
     891       73955 : }
     892             : 
     893      147910 : class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
     894             :  public:
     895             :   explicit RootMarkingVisitor(MarkCompactCollector* collector)
     896       73955 :       : collector_(collector) {}
     897             : 
     898   226380647 :   void VisitRootPointer(Root root, const char* description,
     899             :                         FullObjectSlot p) final {
     900             :     MarkObjectByPointer(root, p);
     901   226380571 :   }
     902             : 
     903     1565092 :   void VisitRootPointers(Root root, const char* description,
     904             :                          FullObjectSlot start, FullObjectSlot end) final {
     905    38982797 :     for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
     906     1565092 :   }
     907             : 
     908             :  private:
     909             :   V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
     910   262233260 :     if (!(*p)->IsHeapObject()) return;
     911             : 
     912   257605973 :     collector_->MarkRootObject(root, HeapObject::cast(*p));
     913             :   }
     914             : 
     915             :   MarkCompactCollector* const collector_;
     916             : };
     917             : 
     918             : // This visitor is used to visit the body of special objects held alive by
     919             : // other roots.
     920             : //
     921             : // It is currently used for
     922             : // - Code held alive by the top optimized frame. This code cannot be deoptimized
     923             : // and thus have to be kept alive in an isolate way, i.e., it should not keep
     924             : // alive other code objects reachable through the weak list but they should
     925             : // keep alive its embedded pointers (which would otherwise be dropped).
     926             : // - Prefix of the string table.
     927      147910 : class MarkCompactCollector::CustomRootBodyMarkingVisitor final
     928             :     : public ObjectVisitor {
     929             :  public:
     930             :   explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
     931       73955 :       : collector_(collector) {}
     932             : 
     933           0 :   void VisitPointer(HeapObject host, ObjectSlot p) final {
     934             :     MarkObject(host, *p);
     935           0 :   }
     936             : 
     937       74428 :   void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
     938      519947 :     for (ObjectSlot p = start; p < end; ++p) {
     939             :       DCHECK(!HasWeakHeapObjectTag(*p));
     940             :       MarkObject(host, *p);
     941             :     }
     942       74428 :   }
     943             : 
     944           0 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     945             :                      MaybeObjectSlot end) final {
     946             :     // At the moment, custom roots cannot contain weak pointers.
     947           0 :     UNREACHABLE();
     948             :   }
     949             : 
     950             :   // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
     951           0 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
     952           0 :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     953             :     MarkObject(host, target);
     954           0 :   }
     955        4870 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
     956             :     MarkObject(host, rinfo->target_object());
     957        4870 :   }
     958             : 
     959             :  private:
     960             :   V8_INLINE void MarkObject(HeapObject host, Object object) {
     961      375961 :     if (!object->IsHeapObject()) return;
     962       82445 :     collector_->MarkObject(host, HeapObject::cast(object));
     963             :   }
     964             : 
     965             :   MarkCompactCollector* const collector_;
     966             : };
     967             : 
     968      147910 : class InternalizedStringTableCleaner : public ObjectVisitor {
     969             :  public:
     970             :   InternalizedStringTableCleaner(Heap* heap, HeapObject table)
     971       73955 :       : heap_(heap), pointers_removed_(0), table_(table) {}
     972             : 
     973       73955 :   void VisitPointers(HeapObject host, ObjectSlot start,
     974             :                      ObjectSlot end) override {
     975             :     // Visit all HeapObject pointers in [start, end).
     976       73955 :     Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
     977             :     MarkCompactCollector::NonAtomicMarkingState* marking_state =
     978             :         heap_->mark_compact_collector()->non_atomic_marking_state();
     979   189345988 :     for (ObjectSlot p = start; p < end; ++p) {
     980             :       Object o = *p;
     981   189198078 :       if (o->IsHeapObject()) {
     982             :         HeapObject heap_object = HeapObject::cast(o);
     983   189198079 :         if (marking_state->IsWhite(heap_object)) {
     984     4392690 :           pointers_removed_++;
     985             :           // Set the entry to the_hole_value (as deleted).
     986             :           p.store(the_hole);
     987             :         } else {
     988             :           // StringTable contains only old space strings.
     989             :           DCHECK(!Heap::InYoungGeneration(o));
     990             :           MarkCompactCollector::RecordSlot(table_, p, heap_object);
     991             :         }
     992             :       }
     993             :     }
     994       73955 :   }
     995             : 
     996           0 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     997             :                      MaybeObjectSlot end) final {
     998           0 :     UNREACHABLE();
     999             :   }
    1000             : 
    1001           0 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    1002             : 
    1003           0 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    1004           0 :     UNREACHABLE();
    1005             :   }
    1006             : 
    1007             :   int PointersRemoved() {
    1008             :     return pointers_removed_;
    1009             :   }
    1010             : 
    1011             :  private:
    1012             :   Heap* heap_;
    1013             :   int pointers_removed_;
    1014             :   HeapObject table_;
    1015             : };
    1016             : 
    1017      147910 : class ExternalStringTableCleaner : public RootVisitor {
    1018             :  public:
    1019       73955 :   explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
    1020             : 
    1021       73932 :   void VisitRootPointers(Root root, const char* description,
    1022             :                          FullObjectSlot start, FullObjectSlot end) override {
    1023             :     // Visit all HeapObject pointers in [start, end).
    1024             :     MarkCompactCollector::NonAtomicMarkingState* marking_state =
    1025       73932 :         heap_->mark_compact_collector()->non_atomic_marking_state();
    1026             :     Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
    1027      250849 :     for (FullObjectSlot p = start; p < end; ++p) {
    1028             :       Object o = *p;
    1029      102985 :       if (o->IsHeapObject()) {
    1030             :         HeapObject heap_object = HeapObject::cast(o);
    1031      102985 :         if (marking_state->IsWhite(heap_object)) {
    1032        1606 :           if (o->IsExternalString()) {
    1033        1606 :             heap_->FinalizeExternalString(String::cast(o));
    1034             :           } else {
    1035             :             // The original external string may have been internalized.
    1036             :             DCHECK(o->IsThinString());
    1037             :           }
    1038             :           // Set the entry to the_hole_value (as deleted).
    1039             :           p.store(the_hole);
    1040             :         }
    1041             :       }
    1042             :     }
    1043       73932 :   }
    1044             : 
    1045             :  private:
    1046             :   Heap* heap_;
    1047             : };
    1048             : 
    1049             : // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
    1050             : // are retained.
    1051      147910 : class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
    1052             :  public:
    1053             :   explicit MarkCompactWeakObjectRetainer(
    1054             :       MarkCompactCollector::NonAtomicMarkingState* marking_state)
    1055       73955 :       : marking_state_(marking_state) {}
    1056             : 
    1057     3964243 :   Object RetainAs(Object object) override {
    1058             :     HeapObject heap_object = HeapObject::cast(object);
    1059             :     DCHECK(!marking_state_->IsGrey(heap_object));
    1060     3964243 :     if (marking_state_->IsBlack(heap_object)) {
    1061     3686247 :       return object;
    1062      425444 :     } else if (object->IsAllocationSite() &&
    1063             :                !(AllocationSite::cast(object)->IsZombie())) {
    1064             :       // "dead" AllocationSites need to live long enough for a traversal of new
    1065             :       // space. These sites get a one-time reprieve.
    1066             : 
    1067             :       Object nested = object;
    1068      235549 :       while (nested->IsAllocationSite()) {
    1069       79613 :         AllocationSite current_site = AllocationSite::cast(nested);
    1070             :         // MarkZombie will override the nested_site, read it first before
    1071             :         // marking
    1072             :         nested = current_site->nested_site();
    1073             :         current_site->MarkZombie();
    1074             :         marking_state_->WhiteToBlack(current_site);
    1075             :       }
    1076             : 
    1077       76323 :       return object;
    1078             :     } else {
    1079      201673 :       return Object();
    1080             :     }
    1081             :   }
    1082             : 
    1083             :  private:
    1084             :   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
    1085             : };
    1086             : 
    1087      122573 : class RecordMigratedSlotVisitor : public ObjectVisitor {
    1088             :  public:
    1089             :   explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
    1090       61319 :       : collector_(collector) {}
    1091             : 
    1092        6672 :   inline void VisitPointer(HeapObject host, ObjectSlot p) final {
    1093             :     DCHECK(!HasWeakHeapObjectTag(*p));
    1094  1013153826 :     RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
    1095        6672 :   }
    1096             : 
    1097           0 :   inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
    1098    58009914 :     RecordMigratedSlot(host, *p, p.address());
    1099           0 :   }
    1100             : 
    1101     9183828 :   inline void VisitPointers(HeapObject host, ObjectSlot start,
    1102             :                             ObjectSlot end) final {
    1103   514975792 :     while (start < end) {
    1104             :       VisitPointer(host, start);
    1105             :       ++start;
    1106             :     }
    1107     9013025 :   }
    1108             : 
    1109     1439306 :   inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
    1110             :                             MaybeObjectSlot end) final {
    1111    30142838 :     while (start < end) {
    1112             :       VisitPointer(host, start);
    1113             :       ++start;
    1114             :     }
    1115     1434938 :   }
    1116             : 
    1117         357 :   inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    1118             :     DCHECK_EQ(host, rinfo->host());
    1119             :     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
    1120         357 :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    1121             :     // The target is always in old space, we don't have to record the slot in
    1122             :     // the old-to-new remembered set.
    1123             :     DCHECK(!Heap::InYoungGeneration(target));
    1124         357 :     collector_->RecordRelocSlot(host, rinfo, target);
    1125         357 :   }
    1126             : 
    1127       51596 :   inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    1128             :     DCHECK_EQ(host, rinfo->host());
    1129             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
    1130             :     HeapObject object = HeapObject::cast(rinfo->target_object());
    1131             :     GenerationalBarrierForCode(host, rinfo, object);
    1132       51596 :     collector_->RecordRelocSlot(host, rinfo, object);
    1133       51591 :   }
    1134             : 
    1135             :   // Entries that are skipped for recording.
    1136           0 :   inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
    1137           0 :   inline void VisitExternalReference(Foreign host, Address* p) final {}
    1138       35903 :   inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
    1139           0 :   inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
    1140             : 
    1141             :  protected:
    1142   534134997 :   inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
    1143             :                                          Address slot) {
    1144   534134997 :     if (value->IsStrongOrWeak()) {
    1145             :       MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
    1146   360699381 :       if (p->InYoungGeneration()) {
    1147             :         DCHECK_IMPLIES(
    1148             :             p->IsToPage(),
    1149             :             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
    1150             :         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
    1151     7301238 :             MemoryChunk::FromHeapObject(host), slot);
    1152   353398143 :       } else if (p->IsEvacuationCandidate()) {
    1153             :         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
    1154    12466385 :             MemoryChunk::FromHeapObject(host), slot);
    1155             :       }
    1156             :     }
    1157   534125936 :   }
    1158             : 
    1159             :   MarkCompactCollector* collector_;
    1160             : };
    1161             : 
    1162             : class MigrationObserver {
    1163             :  public:
    1164       61254 :   explicit MigrationObserver(Heap* heap) : heap_(heap) {}
    1165             : 
    1166       61254 :   virtual ~MigrationObserver() = default;
    1167             :   virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
    1168             :                     int size) = 0;
    1169             : 
    1170             :  protected:
    1171             :   Heap* heap_;
    1172             : };
    1173             : 
    1174       61254 : class ProfilingMigrationObserver final : public MigrationObserver {
    1175             :  public:
    1176       61254 :   explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
    1177             : 
    1178      953099 :   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
    1179             :                    int size) final {
    1180     1119718 :     if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
    1181         424 :       PROFILE(heap_->isolate(),
    1182             :               CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
    1183             :     }
    1184      953099 :     heap_->OnMoveEvent(dst, src, size);
    1185      957169 :   }
    1186             : };
    1187             : 
    1188      308481 : class HeapObjectVisitor {
    1189             :  public:
    1190      308481 :   virtual ~HeapObjectVisitor() = default;
    1191             :   virtual bool Visit(HeapObject object, int size) = 0;
    1192             : };
    1193             : 
    1194      308456 : class EvacuateVisitorBase : public HeapObjectVisitor {
    1195             :  public:
    1196             :   void AddObserver(MigrationObserver* observer) {
    1197        1604 :     migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
    1198        1604 :     observers_.push_back(observer);
    1199             :   }
    1200             : 
    1201             :  protected:
    1202             :   enum MigrationMode { kFast, kObserved };
    1203             : 
    1204             :   typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
    1205             :                                   HeapObject src, int size,
    1206             :                                   AllocationSpace dest);
    1207             : 
    1208             :   template <MigrationMode mode>
    1209    52037750 :   static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
    1210             :                                HeapObject src, int size, AllocationSpace dest) {
    1211             :     Address dst_addr = dst->address();
    1212             :     Address src_addr = src->address();
    1213             :     DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
    1214             :     DCHECK_NE(dest, LO_SPACE);
    1215             :     DCHECK_NE(dest, CODE_LO_SPACE);
    1216    52037750 :     if (dest == OLD_SPACE) {
    1217             :       DCHECK_OBJECT_SIZE(size);
    1218             :       DCHECK(IsAligned(size, kTaggedSize));
    1219             :       base->heap_->CopyBlock(dst_addr, src_addr, size);
    1220             :       if (mode != MigrationMode::kFast)
    1221             :         base->ExecuteMigrationObservers(dest, src, dst, size);
    1222    27395214 :       dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
    1223    24950518 :     } else if (dest == CODE_SPACE) {
    1224             :       DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
    1225             :       base->heap_->CopyBlock(dst_addr, src_addr, size);
    1226        1585 :       Code::cast(dst)->Relocate(dst_addr - src_addr);
    1227             :       if (mode != MigrationMode::kFast)
    1228             :         base->ExecuteMigrationObservers(dest, src, dst, size);
    1229        1586 :       dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
    1230             :     } else {
    1231             :       DCHECK_OBJECT_SIZE(size);
    1232             :       DCHECK(dest == NEW_SPACE);
    1233             :       base->heap_->CopyBlock(dst_addr, src_addr, size);
    1234             :       if (mode != MigrationMode::kFast)
    1235             :         base->ExecuteMigrationObservers(dest, src, dst, size);
    1236             :     }
    1237             :     src->set_map_word(MapWord::FromForwardingAddress(dst));
    1238    52733431 :   }
    1239             : 
    1240             :   EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
    1241             :                       RecordMigratedSlotVisitor* record_visitor)
    1242             :       : heap_(heap),
    1243             :         local_allocator_(local_allocator),
    1244      154228 :         record_visitor_(record_visitor) {
    1245      154228 :     migration_function_ = RawMigrateObject<MigrationMode::kFast>;
    1246             :   }
    1247             : 
    1248    27059546 :   inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
    1249             :                                 int size, HeapObject* target_object) {
    1250             : #ifdef VERIFY_HEAP
    1251             :     if (AbortCompactionForTesting(object)) return false;
    1252             : #endif  // VERIFY_HEAP
    1253             :     AllocationAlignment alignment =
    1254             :         HeapObject::RequiredAlignment(object->map());
    1255             :     AllocationResult allocation =
    1256    27059546 :         local_allocator_->Allocate(target_space, size, alignment);
    1257    27158057 :     if (allocation.To(target_object)) {
    1258             :       MigrateObject(*target_object, object, size, target_space);
    1259    27567697 :       return true;
    1260             :     }
    1261             :     return false;
    1262             :   }
    1263             : 
    1264             :   inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
    1265             :                                         HeapObject dst, int size) {
    1266     1911159 :     for (MigrationObserver* obs : observers_) {
    1267      953995 :       obs->Move(dest, src, dst, size);
    1268             :     }
    1269             :   }
    1270             : 
    1271             :   inline void MigrateObject(HeapObject dst, HeapObject src, int size,
    1272             :                             AllocationSpace dest) {
    1273    52077646 :     migration_function_(this, dst, src, size, dest);
    1274             :   }
    1275             : 
    1276             : #ifdef VERIFY_HEAP
    1277             :   bool AbortCompactionForTesting(HeapObject object) {
    1278             :     if (FLAG_stress_compaction) {
    1279             :       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
    1280             :                              kPageAlignmentMask & ~kObjectAlignmentMask;
    1281             :       if ((object->ptr() & kPageAlignmentMask) == mask) {
    1282             :         Page* page = Page::FromHeapObject(object);
    1283             :         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
    1284             :           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
    1285             :         } else {
    1286             :           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
    1287             :           return true;
    1288             :         }
    1289             :       }
    1290             :     }
    1291             :     return false;
    1292             :   }
    1293             : #endif  // VERIFY_HEAP
    1294             : 
    1295             :   Heap* heap_;
    1296             :   LocalAllocator* local_allocator_;
    1297             :   RecordMigratedSlotVisitor* record_visitor_;
    1298             :   std::vector<MigrationObserver*> observers_;
    1299             :   MigrateFunction migration_function_;
    1300             : };
    1301             : 
    1302      154228 : class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
    1303             :  public:
    1304       77114 :   explicit EvacuateNewSpaceVisitor(
    1305             :       Heap* heap, LocalAllocator* local_allocator,
    1306             :       RecordMigratedSlotVisitor* record_visitor,
    1307             :       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
    1308             :       : EvacuateVisitorBase(heap, local_allocator, record_visitor),
    1309             :         buffer_(LocalAllocationBuffer::InvalidBuffer()),
    1310             :         promoted_size_(0),
    1311             :         semispace_copied_size_(0),
    1312             :         local_pretenuring_feedback_(local_pretenuring_feedback),
    1313      231342 :         is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
    1314             : 
    1315    33065825 :   inline bool Visit(HeapObject object, int size) override {
    1316    33065825 :     if (TryEvacuateWithoutCopy(object)) return true;
    1317    32071543 :     HeapObject target_object;
    1318    71184621 :     if (heap_->ShouldBePromoted(object->address()) &&
    1319     7039917 :         TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
    1320     7030500 :       promoted_size_ += size;
    1321     7030500 :       return true;
    1322             :     }
    1323    50085322 :     heap_->UpdateAllocationSite(object->map(), object,
    1324    25042661 :                                 local_pretenuring_feedback_);
    1325    25025463 :     HeapObject target;
    1326    25025463 :     AllocationSpace space = AllocateTargetObject(object, size, &target);
    1327    24960572 :     MigrateObject(HeapObject::cast(target), object, size, space);
    1328    25036256 :     semispace_copied_size_ += size;
    1329    25036256 :     return true;
    1330             :   }
    1331             : 
    1332             :   intptr_t promoted_size() { return promoted_size_; }
    1333             :   intptr_t semispace_copied_size() { return semispace_copied_size_; }
    1334             : 
    1335             :  private:
    1336    33051696 :   inline bool TryEvacuateWithoutCopy(HeapObject object) {
    1337    33051696 :     if (is_incremental_marking_) return false;
    1338             : 
    1339             :     Map map = object->map();
    1340             : 
    1341             :     // Some objects can be evacuated without creating a copy.
    1342    33064462 :     if (map->visitor_id() == kVisitThinString) {
    1343             :       HeapObject actual = ThinString::cast(object)->unchecked_actual();
    1344     1377065 :       if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
    1345             :       object->map_slot().Relaxed_Store(
    1346             :           MapWord::FromForwardingAddress(actual).ToMap());
    1347     1373968 :       return true;
    1348             :     }
    1349             :     // TODO(mlippautz): Handle ConsString.
    1350             : 
    1351             :     return false;
    1352             :   }
    1353             : 
    1354    25029480 :   inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
    1355             :                                               HeapObject* target_object) {
    1356             :     AllocationAlignment alignment =
    1357             :         HeapObject::RequiredAlignment(old_object->map());
    1358             :     AllocationSpace space_allocated_in = NEW_SPACE;
    1359             :     AllocationResult allocation =
    1360    25029480 :         local_allocator_->Allocate(NEW_SPACE, size, alignment);
    1361    24950699 :     if (allocation.IsRetry()) {
    1362          29 :       allocation = AllocateInOldSpace(size, alignment);
    1363             :       space_allocated_in = OLD_SPACE;
    1364             :     }
    1365             :     bool ok = allocation.To(target_object);
    1366             :     DCHECK(ok);
    1367             :     USE(ok);
    1368    24950699 :     return space_allocated_in;
    1369             :   }
    1370             : 
    1371          29 :   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
    1372             :                                              AllocationAlignment alignment) {
    1373             :     AllocationResult allocation =
    1374          29 :         local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
    1375          29 :     if (allocation.IsRetry()) {
    1376           0 :       heap_->FatalProcessOutOfMemory(
    1377           0 :           "MarkCompactCollector: semi-space copy, fallback in old gen");
    1378             :     }
    1379          29 :     return allocation;
    1380             :   }
    1381             : 
    1382             :   LocalAllocationBuffer buffer_;
    1383             :   intptr_t promoted_size_;
    1384             :   intptr_t semispace_copied_size_;
    1385             :   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
    1386             :   bool is_incremental_marking_;
    1387             : };
    1388             : 
    1389             : template <PageEvacuationMode mode>
    1390      231342 : class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
    1391             :  public:
    1392             :   explicit EvacuateNewSpacePageVisitor(
    1393             :       Heap* heap, RecordMigratedSlotVisitor* record_visitor,
    1394             :       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
    1395             :       : heap_(heap),
    1396             :         record_visitor_(record_visitor),
    1397             :         moved_bytes_(0),
    1398      154228 :         local_pretenuring_feedback_(local_pretenuring_feedback) {}
    1399             : 
    1400        2481 :   static void Move(Page* page) {
    1401             :     switch (mode) {
    1402             :       case NEW_TO_NEW:
    1403             :         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
    1404             :         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
    1405             :         break;
    1406             :       case NEW_TO_OLD: {
    1407         937 :         page->heap()->new_space()->from_space().RemovePage(page);
    1408         937 :         Page* new_page = Page::ConvertNewToOld(page);
    1409             :         DCHECK(!new_page->InYoungGeneration());
    1410             :         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
    1411             :         break;
    1412             :       }
    1413             :     }
    1414        2481 :   }
    1415             : 
    1416     2919806 :   inline bool Visit(HeapObject object, int size) override {
    1417             :     if (mode == NEW_TO_NEW) {
    1418     5839612 :       heap_->UpdateAllocationSite(object->map(), object,
    1419             :                                   local_pretenuring_feedback_);
    1420             :     } else if (mode == NEW_TO_OLD) {
    1421     5809055 :       object->IterateBodyFast(record_visitor_);
    1422             :     }
    1423     2924452 :     return true;
    1424             :   }
    1425             : 
    1426             :   intptr_t moved_bytes() { return moved_bytes_; }
    1427        3510 :   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
    1428             : 
    1429             :  private:
    1430             :   Heap* heap_;
    1431             :   RecordMigratedSlotVisitor* record_visitor_;
    1432             :   intptr_t moved_bytes_;
    1433             :   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
    1434             : };
    1435             : 
    1436      154228 : class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
    1437             :  public:
    1438             :   EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
    1439             :                           RecordMigratedSlotVisitor* record_visitor)
    1440       77114 :       : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
    1441             : 
    1442    20518867 :   inline bool Visit(HeapObject object, int size) override {
    1443    20518867 :     HeapObject target_object;
    1444    20518867 :     if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
    1445             :                           object, size, &target_object)) {
    1446             :       DCHECK(object->map_word().IsForwardingAddress());
    1447             :       return true;
    1448             :     }
    1449          25 :     return false;
    1450             :   }
    1451             : };
    1452             : 
    1453          50 : class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
    1454             :  public:
    1455          25 :   explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
    1456             : 
    1457           0 :   inline bool Visit(HeapObject object, int size) override {
    1458          65 :     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
    1459          65 :     object->IterateBodyFast(&visitor);
    1460           0 :     return true;
    1461             :   }
    1462             : 
    1463             :  private:
    1464             :   Heap* heap_;
    1465             : };
    1466             : 
    1467    10563168 : bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
    1468             :   Object o = *p;
    1469    10563168 :   if (!o->IsHeapObject()) return false;
    1470             :   HeapObject heap_object = HeapObject::cast(o);
    1471             :   return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
    1472    10563168 :       heap_object);
    1473             : }
    1474             : 
    1475       73955 : void MarkCompactCollector::MarkStringTable(
    1476             :     ObjectVisitor* custom_root_body_visitor) {
    1477       73955 :   StringTable string_table = heap()->string_table();
    1478             :   // Mark the string table itself.
    1479       73955 :   if (marking_state()->WhiteToBlack(string_table)) {
    1480             :     // Explicitly mark the prefix.
    1481       73379 :     string_table->IteratePrefix(custom_root_body_visitor);
    1482             :   }
    1483       73955 : }
    1484             : 
    1485       73955 : void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
    1486             :                                      ObjectVisitor* custom_root_body_visitor) {
    1487             :   // Mark the heap roots including global variables, stack variables,
    1488             :   // etc., and all objects reachable from them.
    1489       73955 :   heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
    1490             : 
    1491             :   // Custom marking for string table and top optimized frame.
    1492       73955 :   MarkStringTable(custom_root_body_visitor);
    1493       73955 :   ProcessTopOptimizedFrame(custom_root_body_visitor);
    1494       73955 : }
    1495             : 
    1496      147910 : void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
    1497             :   bool work_to_do = true;
    1498             :   int iterations = 0;
    1499      147910 :   int max_iterations = FLAG_ephemeron_fixpoint_iterations;
    1500             : 
    1501      443808 :   while (work_to_do) {
    1502      147949 :     PerformWrapperTracing();
    1503             : 
    1504      147949 :     if (iterations >= max_iterations) {
    1505             :       // Give up fixpoint iteration and switch to linear algorithm.
    1506           0 :       ProcessEphemeronsLinear();
    1507           0 :       break;
    1508             :     }
    1509             : 
    1510             :     // Move ephemerons from next_ephemerons into current_ephemerons to
    1511             :     // drain them in this iteration.
    1512      147949 :     weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
    1513             :     heap()->concurrent_marking()->set_ephemeron_marked(false);
    1514             : 
    1515             :     {
    1516      591796 :       TRACE_GC(heap()->tracer(),
    1517             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
    1518             : 
    1519      147949 :       if (FLAG_parallel_marking) {
    1520      292854 :         heap_->concurrent_marking()->RescheduleTasksIfNeeded();
    1521             :       }
    1522             : 
    1523      147949 :       work_to_do = ProcessEphemerons();
    1524             :       FinishConcurrentMarking(
    1525      147949 :           ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
    1526             :     }
    1527             : 
    1528      147949 :     CHECK(weak_objects_.current_ephemerons.IsEmpty());
    1529      147949 :     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
    1530             : 
    1531      295868 :     work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
    1532      147910 :                  heap()->concurrent_marking()->ephemeron_marked() ||
    1533      443769 :                  !marking_worklist()->IsEmbedderEmpty() ||
    1534      147910 :                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
    1535      147949 :     ++iterations;
    1536             :   }
    1537             : 
    1538      147910 :   CHECK(marking_worklist()->IsEmpty());
    1539      147910 :   CHECK(weak_objects_.current_ephemerons.IsEmpty());
    1540      147910 :   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
    1541      147910 : }
    1542             : 
    1543      147949 : bool MarkCompactCollector::ProcessEphemerons() {
    1544      147949 :   Ephemeron ephemeron;
    1545             :   bool ephemeron_marked = false;
    1546             : 
    1547             :   // Drain current_ephemerons and push ephemerons where key and value are still
    1548             :   // unreachable into next_ephemerons.
    1549      147982 :   while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
    1550          33 :     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
    1551             :       ephemeron_marked = true;
    1552             :     }
    1553             :   }
    1554             : 
    1555             :   // Drain marking worklist and push discovered ephemerons into
    1556             :   // discovered_ephemerons.
    1557             :   ProcessMarkingWorklist();
    1558             : 
    1559             :   // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
    1560             :   // before) and push ephemerons where key and value are still unreachable into
    1561             :   // next_ephemerons.
    1562      148055 :   while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
    1563         106 :     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
    1564             :       ephemeron_marked = true;
    1565             :     }
    1566             :   }
    1567             : 
    1568             :   // Flush local ephemerons for main task to global pool.
    1569      147949 :   weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
    1570      147949 :   weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
    1571             : 
    1572      147949 :   return ephemeron_marked;
    1573             : }
    1574             : 
    1575           0 : void MarkCompactCollector::ProcessEphemeronsLinear() {
    1576           0 :   TRACE_GC(heap()->tracer(),
    1577             :            GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
    1578           0 :   CHECK(heap()->concurrent_marking()->IsStopped());
    1579             :   std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
    1580           0 :   Ephemeron ephemeron;
    1581             : 
    1582             :   DCHECK(weak_objects_.current_ephemerons.IsEmpty());
    1583           0 :   weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
    1584             : 
    1585           0 :   while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
    1586           0 :     ProcessEphemeron(ephemeron.key, ephemeron.value);
    1587             : 
    1588           0 :     if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
    1589           0 :       key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
    1590             :     }
    1591             :   }
    1592             : 
    1593           0 :   ephemeron_marking_.newly_discovered_limit = key_to_values.size();
    1594             :   bool work_to_do = true;
    1595             : 
    1596           0 :   while (work_to_do) {
    1597           0 :     PerformWrapperTracing();
    1598             : 
    1599             :     ResetNewlyDiscovered();
    1600           0 :     ephemeron_marking_.newly_discovered_limit = key_to_values.size();
    1601             : 
    1602             :     {
    1603           0 :       TRACE_GC(heap()->tracer(),
    1604             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
    1605             :       // Drain marking worklist and push all discovered objects into
    1606             :       // newly_discovered.
    1607             :       ProcessMarkingWorklistInternal<
    1608             :           MarkCompactCollector::MarkingWorklistProcessingMode::
    1609           0 :               kTrackNewlyDiscoveredObjects>();
    1610             :     }
    1611             : 
    1612           0 :     while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
    1613           0 :       ProcessEphemeron(ephemeron.key, ephemeron.value);
    1614             : 
    1615           0 :       if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
    1616           0 :         key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
    1617             :       }
    1618             :     }
    1619             : 
    1620           0 :     if (ephemeron_marking_.newly_discovered_overflowed) {
    1621             :       // If newly_discovered was overflowed just visit all ephemerons in
    1622             :       // next_ephemerons.
    1623           0 :       weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
    1624           0 :         if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
    1625             :             non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
    1626             :           marking_worklist()->Push(ephemeron.value);
    1627             :         }
    1628           0 :       });
    1629             : 
    1630             :     } else {
    1631             :       // This is the good case: newly_discovered stores all discovered
    1632             :       // objects. Now use key_to_values to see if discovered objects keep more
    1633             :       // objects alive due to ephemeron semantics.
    1634           0 :       for (HeapObject object : ephemeron_marking_.newly_discovered) {
    1635             :         auto range = key_to_values.equal_range(object);
    1636           0 :         for (auto it = range.first; it != range.second; ++it) {
    1637           0 :           HeapObject value = it->second;
    1638             :           MarkObject(object, value);
    1639             :         }
    1640             :       }
    1641             :     }
    1642             : 
    1643             :     // Do NOT drain marking worklist here, otherwise the current checks
    1644             :     // for work_to_do are not sufficient for determining if another iteration
    1645             :     // is necessary.
    1646             : 
    1647           0 :     work_to_do = !marking_worklist()->IsEmpty() ||
    1648           0 :                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
    1649           0 :     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
    1650             :   }
    1651             : 
    1652             :   ResetNewlyDiscovered();
    1653             :   ephemeron_marking_.newly_discovered.shrink_to_fit();
    1654             : 
    1655           0 :   CHECK(marking_worklist()->IsEmpty());
    1656           0 : }
    1657             : 
    1658      221904 : void MarkCompactCollector::PerformWrapperTracing() {
    1659      443808 :   if (heap_->local_embedder_heap_tracer()->InUse()) {
    1660         540 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
    1661             :     {
    1662             :       LocalEmbedderHeapTracer::ProcessingScope scope(
    1663         405 :           heap_->local_embedder_heap_tracer());
    1664         135 :       HeapObject object;
    1665         205 :       while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
    1666          35 :         scope.TracePossibleWrapper(JSObject::cast(object));
    1667             :       }
    1668             :     }
    1669         135 :     heap_->local_embedder_heap_tracer()->Trace(
    1670         135 :         std::numeric_limits<double>::infinity());
    1671             :   }
    1672      221904 : }
    1673             : 
    1674           0 : void MarkCompactCollector::ProcessMarkingWorklist() {
    1675             :   ProcessMarkingWorklistInternal<
    1676      517724 :       MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
    1677           0 : }
    1678             : 
    1679             : template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
    1680      517723 : void MarkCompactCollector::ProcessMarkingWorklistInternal() {
    1681             :   HeapObject object;
    1682             :   MarkCompactMarkingVisitor visitor(this, marking_state());
    1683   330947059 :   while (!(object = marking_worklist()->Pop()).is_null()) {
    1684             :     DCHECK(!object->IsFiller());
    1685             :     DCHECK(object->IsHeapObject());
    1686             :     DCHECK(heap()->Contains(object));
    1687             :     DCHECK(!(marking_state()->IsWhite(object)));
    1688             :     marking_state()->GreyToBlack(object);
    1689             :     if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
    1690             :                     kTrackNewlyDiscoveredObjects) {
    1691           0 :       AddNewlyDiscovered(object);
    1692             :     }
    1693             :     Map map = object->map();
    1694             :     MarkObject(object, map);
    1695             :     visitor.Visit(map, object);
    1696             :   }
    1697      517723 : }
    1698             : 
    1699         139 : bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
    1700         139 :   if (marking_state()->IsBlackOrGrey(key)) {
    1701          34 :     if (marking_state()->WhiteToGrey(value)) {
    1702             :       marking_worklist()->Push(value);
    1703          29 :       return true;
    1704             :     }
    1705             : 
    1706         105 :   } else if (marking_state()->IsWhite(value)) {
    1707         105 :     weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
    1708             :   }
    1709             : 
    1710             :   return false;
    1711             : }
    1712             : 
    1713      147910 : void MarkCompactCollector::ProcessEphemeronMarking() {
    1714             :   DCHECK(marking_worklist()->IsEmpty());
    1715             : 
    1716             :   // Incremental marking might leave ephemerons in main task's local
    1717             :   // buffer, flush it into global pool.
    1718      147910 :   weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
    1719             : 
    1720      147910 :   ProcessEphemeronsUntilFixpoint();
    1721             : 
    1722      147910 :   CHECK(marking_worklist()->IsEmpty());
    1723      147910 :   CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
    1724      147910 : }
    1725             : 
    1726       73955 : void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
    1727      157969 :   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
    1728       84014 :        !it.done(); it.Advance()) {
    1729      128296 :     if (it.frame()->type() == StackFrame::INTERPRETED) {
    1730             :       return;
    1731             :     }
    1732       91505 :     if (it.frame()->type() == StackFrame::OPTIMIZED) {
    1733        7491 :       Code code = it.frame()->LookupCode();
    1734        7491 :       if (!code->CanDeoptAt(it.frame()->pc())) {
    1735        1049 :         Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
    1736             :       }
    1737             :       return;
    1738             :     }
    1739             :   }
    1740             : }
    1741             : 
    1742       73955 : void MarkCompactCollector::RecordObjectStats() {
    1743       73955 :   if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
    1744           0 :     heap()->CreateObjectStats();
    1745             :     ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
    1746             :                                    heap()->dead_object_stats_.get());
    1747           0 :     collector.Collect();
    1748           0 :     if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
    1749             :                     v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
    1750           0 :       std::stringstream live, dead;
    1751           0 :       heap()->live_object_stats_->Dump(live);
    1752           0 :       heap()->dead_object_stats_->Dump(dead);
    1753           0 :       TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
    1754             :                            "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
    1755             :                            "live", TRACE_STR_COPY(live.str().c_str()), "dead",
    1756             :                            TRACE_STR_COPY(dead.str().c_str()));
    1757             :     }
    1758           0 :     if (FLAG_trace_gc_object_stats) {
    1759           0 :       heap()->live_object_stats_->PrintJSON("live");
    1760           0 :       heap()->dead_object_stats_->PrintJSON("dead");
    1761             :     }
    1762           0 :     heap()->live_object_stats_->CheckpointObjectStats();
    1763           0 :     heap()->dead_object_stats_->ClearObjectStats();
    1764             :   }
    1765       73955 : }
    1766             : 
    1767       73955 : void MarkCompactCollector::MarkLiveObjects() {
    1768      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
    1769             :   // The recursive GC marker detects when it is nearing stack overflow,
    1770             :   // and switches to a different marking system.  JS interrupts interfere
    1771             :   // with the C stack limit check.
    1772             :   PostponeInterruptsScope postpone(isolate());
    1773             : 
    1774             :   {
    1775      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
    1776       73955 :     IncrementalMarking* incremental_marking = heap_->incremental_marking();
    1777       73955 :     if (was_marked_incrementally_) {
    1778       21388 :       incremental_marking->Finalize();
    1779             :     } else {
    1780       52567 :       CHECK(incremental_marking->IsStopped());
    1781             :     }
    1782             :   }
    1783             : 
    1784             : #ifdef DEBUG
    1785             :   DCHECK(state_ == PREPARE_GC);
    1786             :   state_ = MARK_LIVE_OBJECTS;
    1787             : #endif
    1788             : 
    1789      147910 :   heap_->local_embedder_heap_tracer()->EnterFinalPause();
    1790             : 
    1791             :   RootMarkingVisitor root_visitor(this);
    1792             : 
    1793             :   {
    1794      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
    1795             :     CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
    1796       73955 :     MarkRoots(&root_visitor, &custom_root_body_visitor);
    1797             :   }
    1798             : 
    1799             :   {
    1800      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
    1801       73955 :     if (FLAG_parallel_marking) {
    1802      146388 :       heap_->concurrent_marking()->RescheduleTasksIfNeeded();
    1803             :     }
    1804             :     ProcessMarkingWorklist();
    1805             : 
    1806             :     FinishConcurrentMarking(
    1807       73955 :         ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
    1808             :     ProcessMarkingWorklist();
    1809             :   }
    1810             : 
    1811             :   {
    1812      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
    1813             : 
    1814             :     DCHECK(marking_worklist()->IsEmpty());
    1815             : 
    1816             :     // Mark objects reachable through the embedder heap. This phase is
    1817             :     // opportunistic as it may not discover graphs that are only reachable
    1818             :     // through ephemerons.
    1819             :     {
    1820      295820 :       TRACE_GC(heap()->tracer(),
    1821             :                GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
    1822       73955 :       do {
    1823             :         // PerformWrapperTracing() also empties the work items collected by
    1824             :         // concurrent markers. As a result this call needs to happen at least
    1825             :         // once.
    1826       73955 :         PerformWrapperTracing();
    1827             :         ProcessMarkingWorklist();
    1828      221865 :       } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
    1829       73955 :                !marking_worklist()->IsEmbedderEmpty());
    1830             :       DCHECK(marking_worklist()->IsEmbedderEmpty());
    1831             :       DCHECK(marking_worklist()->IsEmpty());
    1832             :     }
    1833             : 
    1834             :     // The objects reachable from the roots are marked, yet unreachable objects
    1835             :     // are unmarked. Mark objects reachable due to embedder heap tracing or
    1836             :     // harmony weak maps.
    1837             :     {
    1838      295820 :       TRACE_GC(heap()->tracer(),
    1839             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
    1840       73955 :       ProcessEphemeronMarking();
    1841             :       DCHECK(marking_worklist()->IsEmpty());
    1842             :     }
    1843             : 
    1844             :     // The objects reachable from the roots, weak maps, and embedder heap
    1845             :     // tracing are marked. Objects pointed to only by weak global handles cannot
    1846             :     // be immediately reclaimed. Instead, we have to mark them as pending and
    1847             :     // mark objects reachable from them.
    1848             :     //
    1849             :     // First we identify nonlive weak handles and mark them as pending
    1850             :     // destruction.
    1851             :     {
    1852      295820 :       TRACE_GC(heap()->tracer(),
    1853             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
    1854             :       heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
    1855       73955 :           &IsUnmarkedHeapObject);
    1856             :       ProcessMarkingWorklist();
    1857             :     }
    1858             : 
    1859             :     // Process finalizers, effectively keeping them alive until the next
    1860             :     // garbage collection.
    1861             :     {
    1862      295820 :       TRACE_GC(heap()->tracer(),
    1863             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
    1864             :       heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
    1865       73955 :           &root_visitor);
    1866             :       ProcessMarkingWorklist();
    1867             :     }
    1868             : 
    1869             :     // Repeat ephemeron processing from the newly marked objects.
    1870             :     {
    1871      295820 :       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
    1872       73955 :       ProcessEphemeronMarking();
    1873             :       DCHECK(marking_worklist()->IsEmbedderEmpty());
    1874             :       DCHECK(marking_worklist()->IsEmpty());
    1875             :     }
    1876             : 
    1877             :     {
    1878             :       heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
    1879       73955 :           &IsUnmarkedHeapObject);
    1880             :     }
    1881             :   }
    1882             : 
    1883       73955 :   if (was_marked_incrementally_) {
    1884       21388 :     heap()->incremental_marking()->Deactivate();
    1885             :   }
    1886       73955 : }
    1887             : 
    1888       73955 : void MarkCompactCollector::ClearNonLiveReferences() {
    1889      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
    1890             : 
    1891             :   {
    1892      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
    1893             : 
    1894             :     // Prune the string table removing all strings only pointed to by the
    1895             :     // string table.  Cannot use string_table() here because the string
    1896             :     // table is marked.
    1897       73955 :     StringTable string_table = heap()->string_table();
    1898             :     InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
    1899       73955 :     string_table->IterateElements(&internalized_visitor);
    1900       73955 :     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
    1901             : 
    1902             :     ExternalStringTableCleaner external_visitor(heap());
    1903       73955 :     heap()->external_string_table_.IterateAll(&external_visitor);
    1904       73955 :     heap()->external_string_table_.CleanUpAll();
    1905             :   }
    1906             : 
    1907             :   {
    1908      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
    1909       73955 :     ClearOldBytecodeCandidates();
    1910             :   }
    1911             : 
    1912             :   {
    1913      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
    1914       73955 :     ClearFlushedJsFunctions();
    1915             :   }
    1916             : 
    1917             :   {
    1918      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
    1919             :     // Process the weak references.
    1920             :     MarkCompactWeakObjectRetainer mark_compact_object_retainer(
    1921             :         non_atomic_marking_state());
    1922       73955 :     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
    1923             :   }
    1924             : 
    1925             :   {
    1926      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
    1927             :     // ClearFullMapTransitions must be called before weak references are
    1928             :     // cleared.
    1929       73955 :     ClearFullMapTransitions();
    1930             :   }
    1931             :   {
    1932      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
    1933       73955 :     ClearWeakReferences();
    1934       73955 :     ClearWeakCollections();
    1935       73955 :     ClearJSWeakRefs();
    1936             :   }
    1937             : 
    1938       73955 :   MarkDependentCodeForDeoptimization();
    1939             : 
    1940             :   DCHECK(weak_objects_.transition_arrays.IsEmpty());
    1941             :   DCHECK(weak_objects_.weak_references.IsEmpty());
    1942             :   DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
    1943             :   DCHECK(weak_objects_.js_weak_refs.IsEmpty());
    1944             :   DCHECK(weak_objects_.weak_cells.IsEmpty());
    1945             :   DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
    1946             :   DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
    1947       73955 : }
    1948             : 
    1949       73955 : void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
    1950       73955 :   std::pair<HeapObject, Code> weak_object_in_code;
    1951      175695 :   while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
    1952             :                                                 &weak_object_in_code)) {
    1953       50870 :     HeapObject object = weak_object_in_code.first;
    1954       50870 :     Code code = weak_object_in_code.second;
    1955       51710 :     if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
    1956         840 :         !code->embedded_objects_cleared()) {
    1957         278 :       if (!code->marked_for_deoptimization()) {
    1958         112 :         code->SetMarkedForDeoptimization("weak objects");
    1959         112 :         have_code_to_deoptimize_ = true;
    1960             :       }
    1961         278 :       code->ClearEmbeddedObjects(heap_);
    1962             :       DCHECK(code->embedded_objects_cleared());
    1963             :     }
    1964             :   }
    1965       73955 : }
    1966             : 
    1967      390825 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
    1968             :   DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
    1969             :   Object potential_parent = dead_target->constructor_or_backpointer();
    1970      390825 :   if (potential_parent->IsMap()) {
    1971             :     Map parent = Map::cast(potential_parent);
    1972             :     DisallowHeapAllocation no_gc_obviously;
    1973      967791 :     if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
    1974             :         TransitionsAccessor(isolate(), parent, &no_gc_obviously)
    1975      479689 :             .HasSimpleTransitionTo(dead_target)) {
    1976       17540 :       ClearPotentialSimpleMapTransition(parent, dead_target);
    1977             :     }
    1978             :   }
    1979      390825 : }
    1980             : 
    1981       17540 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
    1982             :                                                              Map dead_target) {
    1983             :   DCHECK(!map->is_prototype_map());
    1984             :   DCHECK(!dead_target->is_prototype_map());
    1985             :   DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
    1986             :   // Take ownership of the descriptor array.
    1987             :   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
    1988             :   DescriptorArray descriptors = map->instance_descriptors();
    1989       17540 :   if (descriptors == dead_target->instance_descriptors() &&
    1990             :       number_of_own_descriptors > 0) {
    1991        3779 :     TrimDescriptorArray(map, descriptors);
    1992             :     DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
    1993             :   }
    1994       17540 : }
    1995             : 
    1996      108842 : void MarkCompactCollector::FlushBytecodeFromSFI(
    1997             :     SharedFunctionInfo shared_info) {
    1998             :   DCHECK(shared_info->HasBytecodeArray());
    1999             : 
    2000             :   // Retain objects required for uncompiled data.
    2001      108842 :   String inferred_name = shared_info->inferred_name();
    2002      108842 :   int start_position = shared_info->StartPosition();
    2003      108842 :   int end_position = shared_info->EndPosition();
    2004             : 
    2005      108842 :   shared_info->DiscardCompiledMetadata(
    2006      108842 :       isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
    2007             :         RecordSlot(object, slot, target);
    2008      217684 :       });
    2009             : 
    2010             :   // The size of the bytecode array should always be larger than an
    2011             :   // UncompiledData object.
    2012             :   STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
    2013             :                 UncompiledDataWithoutPreparseData::kSize);
    2014             : 
    2015             :   // Replace bytecode array with an uncompiled data array.
    2016      108842 :   HeapObject compiled_data = shared_info->GetBytecodeArray();
    2017             :   Address compiled_data_start = compiled_data->address();
    2018      108842 :   int compiled_data_size = compiled_data->Size();
    2019             :   MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
    2020             : 
    2021             :   // Clear any recorded slots for the compiled data as being invalid.
    2022      108842 :   RememberedSet<OLD_TO_NEW>::RemoveRange(
    2023             :       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
    2024      108842 :       SlotSet::PREFREE_EMPTY_BUCKETS);
    2025             :   RememberedSet<OLD_TO_OLD>::RemoveRange(
    2026             :       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
    2027      108842 :       SlotSet::PREFREE_EMPTY_BUCKETS);
    2028             : 
    2029             :   // Swap the map, using set_map_after_allocation to avoid verify heap checks
    2030             :   // which are not necessary since we are doing this during the GC atomic pause.
    2031             :   compiled_data->set_map_after_allocation(
    2032             :       ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
    2033             :       SKIP_WRITE_BARRIER);
    2034             : 
    2035             :   // Create a filler object for any left over space in the bytecode array.
    2036      108842 :   if (!heap()->IsLargeObject(compiled_data)) {
    2037             :     heap()->CreateFillerObjectAt(
    2038             :         compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
    2039             :         compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
    2040      217684 :         ClearRecordedSlots::kNo);
    2041             :   }
    2042             : 
    2043             :   // Initialize the uncompiled data.
    2044             :   UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
    2045      108842 :   UncompiledData::Initialize(
    2046             :       uncompiled_data, inferred_name, start_position, end_position,
    2047             :       kFunctionLiteralIdInvalid,
    2048      108842 :       [](HeapObject object, ObjectSlot slot, HeapObject target) {
    2049             :         RecordSlot(object, slot, target);
    2050      217684 :       });
    2051             : 
    2052             :   // Mark the uncompiled data as black, and ensure all fields have already been
    2053             :   // marked.
    2054             :   DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
    2055             :   non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
    2056             : 
    2057             :   // Use the raw function data setter to avoid validity checks, since we're
    2058             :   // performing the unusual task of decompiling.
    2059      108842 :   shared_info->set_function_data(uncompiled_data);
    2060             :   DCHECK(!shared_info->is_compiled());
    2061      108842 : }
    2062             : 
    2063       73955 : void MarkCompactCollector::ClearOldBytecodeCandidates() {
    2064             :   DCHECK(FLAG_flush_bytecode ||
    2065             :          weak_objects_.bytecode_flushing_candidates.IsEmpty());
    2066       73955 :   SharedFunctionInfo flushing_candidate;
    2067      413764 :   while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
    2068             :                                                         &flushing_candidate)) {
    2069             :     // If the BytecodeArray is dead, flush it, which will replace the field with
    2070             :     // an uncompiled data object.
    2071      339809 :     if (!non_atomic_marking_state()->IsBlackOrGrey(
    2072      679618 :             flushing_candidate->GetBytecodeArray())) {
    2073      108842 :       FlushBytecodeFromSFI(flushing_candidate);
    2074             :     }
    2075             : 
    2076             :     // Now record the slot, which has either been updated to an uncompiled data,
    2077             :     // or is the BytecodeArray which is still alive.
    2078             :     ObjectSlot slot =
    2079             :         flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
    2080             :     RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
    2081             :   }
    2082       73955 : }
    2083             : 
    2084       73955 : void MarkCompactCollector::ClearFlushedJsFunctions() {
    2085             :   DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
    2086       73955 :   JSFunction flushed_js_function;
    2087       88409 :   while (weak_objects_.flushed_js_functions.Pop(kMainThread,
    2088             :                                                 &flushed_js_function)) {
    2089        7227 :     flushed_js_function->ResetIfBytecodeFlushed();
    2090             :   }
    2091       73955 : }
    2092             : 
    2093       73955 : void MarkCompactCollector::ClearFullMapTransitions() {
    2094       73955 :   TransitionArray array;
    2095      798754 :   while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
    2096             :     int num_transitions = array->number_of_entries();
    2097      724799 :     if (num_transitions > 0) {
    2098      548592 :       Map map;
    2099             :       // The array might contain "undefined" elements because it's not yet
    2100             :       // filled. Allow it.
    2101      548592 :       if (array->GetTargetIfExists(0, isolate(), &map)) {
    2102             :         DCHECK(!map.is_null());  // Weak pointers aren't cleared yet.
    2103             :         Map parent = Map::cast(map->constructor_or_backpointer());
    2104             :         bool parent_is_alive =
    2105             :             non_atomic_marking_state()->IsBlackOrGrey(parent);
    2106             :         DescriptorArray descriptors = parent_is_alive
    2107             :                                           ? parent->instance_descriptors()
    2108     1097184 :                                           : DescriptorArray();
    2109             :         bool descriptors_owner_died =
    2110      548592 :             CompactTransitionArray(parent, array, descriptors);
    2111      548592 :         if (descriptors_owner_died) {
    2112        2598 :           TrimDescriptorArray(parent, descriptors);
    2113             :         }
    2114             :       }
    2115             :     }
    2116             :   }
    2117       73955 : }
    2118             : 
    2119      548592 : bool MarkCompactCollector::CompactTransitionArray(Map map,
    2120             :                                                   TransitionArray transitions,
    2121             :                                                   DescriptorArray descriptors) {
    2122             :   DCHECK(!map->is_prototype_map());
    2123             :   int num_transitions = transitions->number_of_entries();
    2124             :   bool descriptors_owner_died = false;
    2125             :   int transition_index = 0;
    2126             :   // Compact all live transitions to the left.
    2127     2058604 :   for (int i = 0; i < num_transitions; ++i) {
    2128             :     Map target = transitions->GetTarget(i);
    2129             :     DCHECK_EQ(target->constructor_or_backpointer(), map);
    2130      755006 :     if (non_atomic_marking_state()->IsWhite(target)) {
    2131      143500 :       if (!descriptors.is_null() &&
    2132             :           target->instance_descriptors() == descriptors) {
    2133             :         DCHECK(!target->is_prototype_map());
    2134             :         descriptors_owner_died = true;
    2135             :       }
    2136             :     } else {
    2137      683256 :       if (i != transition_index) {
    2138       22630 :         Name key = transitions->GetKey(i);
    2139             :         transitions->SetKey(transition_index, key);
    2140             :         HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
    2141             :         RecordSlot(transitions, key_slot, key);
    2142       22630 :         MaybeObject raw_target = transitions->GetRawTarget(i);
    2143             :         transitions->SetRawTarget(transition_index, raw_target);
    2144             :         HeapObjectSlot target_slot =
    2145             :             transitions->GetTargetSlot(transition_index);
    2146             :         RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
    2147             :       }
    2148      683256 :       transition_index++;
    2149             :     }
    2150             :   }
    2151             :   // If there are no transitions to be cleared, return.
    2152      548592 :   if (transition_index == num_transitions) {
    2153             :     DCHECK(!descriptors_owner_died);
    2154             :     return false;
    2155             :   }
    2156             :   // Note that we never eliminate a transition array, though we might right-trim
    2157             :   // such that number_of_transitions() == 0. If this assumption changes,
    2158             :   // TransitionArray::Insert() will need to deal with the case that a transition
    2159             :   // array disappeared during GC.
    2160       17448 :   int trim = transitions->Capacity() - transition_index;
    2161       17448 :   if (trim > 0) {
    2162       17448 :     heap_->RightTrimWeakFixedArray(transitions,
    2163       17448 :                                    trim * TransitionArray::kEntrySize);
    2164             :     transitions->SetNumberOfTransitions(transition_index);
    2165             :   }
    2166             :   return descriptors_owner_died;
    2167             : }
    2168             : 
    2169        5547 : void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
    2170             :                                                     int descriptors_to_trim) {
    2171        5547 :   int old_nof_all_descriptors = array->number_of_all_descriptors();
    2172        5547 :   int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
    2173             :   DCHECK_LT(0, descriptors_to_trim);
    2174             :   DCHECK_LE(0, new_nof_all_descriptors);
    2175             :   Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
    2176             :   Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
    2177             :   RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
    2178             :                                          start, end,
    2179        5547 :                                          SlotSet::PREFREE_EMPTY_BUCKETS);
    2180             :   RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
    2181             :                                          start, end,
    2182        5547 :                                          SlotSet::PREFREE_EMPTY_BUCKETS);
    2183             :   heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
    2184        5547 :                                ClearRecordedSlots::kNo);
    2185             :   array->set_number_of_all_descriptors(new_nof_all_descriptors);
    2186        5547 : }
    2187             : 
    2188        6377 : void MarkCompactCollector::TrimDescriptorArray(Map map,
    2189             :                                                DescriptorArray descriptors) {
    2190             :   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
    2191        6377 :   if (number_of_own_descriptors == 0) {
    2192             :     DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
    2193             :     return;
    2194             :   }
    2195             :   // TODO(ulan): Trim only if slack is greater than some percentage threshold.
    2196             :   int to_trim =
    2197        6324 :       descriptors->number_of_all_descriptors() - number_of_own_descriptors;
    2198        6324 :   if (to_trim > 0) {
    2199             :     descriptors->set_number_of_descriptors(number_of_own_descriptors);
    2200        5547 :     RightTrimDescriptorArray(descriptors, to_trim);
    2201             : 
    2202        5547 :     TrimEnumCache(map, descriptors);
    2203        5547 :     descriptors->Sort();
    2204             : 
    2205             :     if (FLAG_unbox_double_fields) {
    2206             :       LayoutDescriptor layout_descriptor = map->layout_descriptor();
    2207             :       layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
    2208             :                                                   number_of_own_descriptors);
    2209             :       SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
    2210             :     }
    2211             :   }
    2212             :   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
    2213        6324 :   map->set_owns_descriptors(true);
    2214             : }
    2215             : 
    2216        5547 : void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
    2217             :   int live_enum = map->EnumLength();
    2218        5547 :   if (live_enum == kInvalidEnumCacheSentinel) {
    2219        5503 :     live_enum = map->NumberOfEnumerableProperties();
    2220             :   }
    2221        5547 :   if (live_enum == 0) return descriptors->ClearEnumCache();
    2222             :   EnumCache enum_cache = descriptors->enum_cache();
    2223             : 
    2224             :   FixedArray keys = enum_cache->keys();
    2225        5504 :   int to_trim = keys->length() - live_enum;
    2226        5504 :   if (to_trim <= 0) return;
    2227          73 :   heap_->RightTrimFixedArray(keys, to_trim);
    2228             : 
    2229             :   FixedArray indices = enum_cache->indices();
    2230          73 :   to_trim = indices->length() - live_enum;
    2231          73 :   if (to_trim <= 0) return;
    2232          64 :   heap_->RightTrimFixedArray(indices, to_trim);
    2233             : }
    2234             : 
    2235       73955 : void MarkCompactCollector::ClearWeakCollections() {
    2236      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
    2237       73955 :   EphemeronHashTable table;
    2238             : 
    2239       82057 :   while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
    2240       97846 :     for (int i = 0; i < table->Capacity(); i++) {
    2241       44872 :       HeapObject key = HeapObject::cast(table->KeyAt(i));
    2242             : #ifdef VERIFY_HEAP
    2243             :       Object value = table->ValueAt(i);
    2244             : 
    2245             :       if (value->IsHeapObject()) {
    2246             :         CHECK_IMPLIES(
    2247             :             non_atomic_marking_state()->IsBlackOrGrey(key),
    2248             :             non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
    2249             :       }
    2250             : #endif
    2251       44872 :       if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
    2252          88 :         table->RemoveEntry(i);
    2253             :       }
    2254             :     }
    2255             :   }
    2256       73955 : }
    2257             : 
    2258       73955 : void MarkCompactCollector::ClearWeakReferences() {
    2259      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
    2260             :   std::pair<HeapObject, HeapObjectSlot> slot;
    2261             :   HeapObjectReference cleared_weak_ref =
    2262             :       HeapObjectReference::ClearedValue(isolate());
    2263    30260299 :   while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
    2264             :     HeapObject value;
    2265             :     // The slot could have been overwritten, so we have to treat it
    2266             :     // as MaybeObjectSlot.
    2267             :     MaybeObjectSlot location(slot.second);
    2268    30186344 :     if ((*location)->GetHeapObjectIfWeak(&value)) {
    2269             :       DCHECK(!value->IsCell());
    2270    30166577 :       if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
    2271             :         // The value of the weak reference is alive.
    2272             :         RecordSlot(slot.first, HeapObjectSlot(location), value);
    2273             :       } else {
    2274     2878324 :         if (value->IsMap()) {
    2275             :           // The map is non-live.
    2276      390825 :           ClearPotentialSimpleMapTransition(Map::cast(value));
    2277             :         }
    2278             :         location.store(cleared_weak_ref);
    2279             :       }
    2280             :     }
    2281             :   }
    2282       73955 : }
    2283             : 
    2284       73955 : void MarkCompactCollector::ClearJSWeakRefs() {
    2285       73955 :   if (!FLAG_harmony_weak_refs) {
    2286       73550 :     return;
    2287             :   }
    2288         405 :   JSWeakRef weak_ref;
    2289         522 :   while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
    2290             :     HeapObject target = HeapObject::cast(weak_ref->target());
    2291         117 :     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
    2292          74 :       weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
    2293             :     } else {
    2294             :       // The value of the JSWeakRef is alive.
    2295             :       ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
    2296             :       RecordSlot(weak_ref, slot, target);
    2297             :     }
    2298             :   }
    2299         405 :   WeakCell weak_cell;
    2300         687 :   while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
    2301             :     HeapObject target = HeapObject::cast(weak_cell->target());
    2302         282 :     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
    2303             :       DCHECK(!target->IsUndefined());
    2304             :       // The value of the WeakCell is dead.
    2305             :       JSFinalizationGroup finalization_group =
    2306             :           JSFinalizationGroup::cast(weak_cell->finalization_group());
    2307         262 :       if (!finalization_group->scheduled_for_cleanup()) {
    2308         208 :         heap()->AddDirtyJSFinalizationGroup(
    2309             :             finalization_group,
    2310         208 :             [](HeapObject object, ObjectSlot slot, Object target) {
    2311         208 :               if (target->IsHeapObject()) {
    2312             :                 RecordSlot(object, slot, HeapObject::cast(target));
    2313             :               }
    2314         416 :             });
    2315             :       }
    2316             :       // We're modifying the pointers in WeakCell and JSFinalizationGroup during
    2317             :       // GC; thus we need to record the slots it writes. The normal write
    2318             :       // barrier is not enough, since it's disabled before GC.
    2319         262 :       weak_cell->Nullify(isolate(),
    2320         850 :                          [](HeapObject object, ObjectSlot slot, Object target) {
    2321         850 :                            if (target->IsHeapObject()) {
    2322             :                              RecordSlot(object, slot, HeapObject::cast(target));
    2323             :                            }
    2324        1112 :                          });
    2325             :       DCHECK(finalization_group->NeedsCleanup());
    2326             :       DCHECK(finalization_group->scheduled_for_cleanup());
    2327             :     } else {
    2328             :       // The value of the WeakCell is alive.
    2329             :       ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
    2330             :       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
    2331             :     }
    2332             :   }
    2333             : }
    2334             : 
    2335       61518 : void MarkCompactCollector::AbortWeakObjects() {
    2336             :   weak_objects_.transition_arrays.Clear();
    2337             :   weak_objects_.ephemeron_hash_tables.Clear();
    2338             :   weak_objects_.current_ephemerons.Clear();
    2339             :   weak_objects_.next_ephemerons.Clear();
    2340             :   weak_objects_.discovered_ephemerons.Clear();
    2341             :   weak_objects_.weak_references.Clear();
    2342             :   weak_objects_.weak_objects_in_code.Clear();
    2343             :   weak_objects_.js_weak_refs.Clear();
    2344             :   weak_objects_.weak_cells.Clear();
    2345             :   weak_objects_.bytecode_flushing_candidates.Clear();
    2346             :   weak_objects_.flushed_js_functions.Clear();
    2347       61519 : }
    2348             : 
    2349           0 : bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
    2350           0 :   return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
    2351             : }
    2352             : 
    2353             : MarkCompactCollector::RecordRelocSlotInfo
    2354     7555576 : MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
    2355             :                                              HeapObject target) {
    2356             :   RecordRelocSlotInfo result;
    2357     7555576 :   result.should_record = false;
    2358             :   Page* target_page = Page::FromHeapObject(target);
    2359             :   Page* source_page = Page::FromHeapObject(host);
    2360    15111152 :   if (target_page->IsEvacuationCandidate() &&
    2361      299471 :       (rinfo->host().is_null() ||
    2362             :        !source_page->ShouldSkipEvacuationSlotRecording())) {
    2363             :     RelocInfo::Mode rmode = rinfo->rmode();
    2364             :     Address addr = rinfo->pc();
    2365             :     SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    2366      277576 :     if (rinfo->IsInConstantPool()) {
    2367             :       addr = rinfo->constant_pool_entry_address();
    2368             :       if (RelocInfo::IsCodeTargetMode(rmode)) {
    2369             :         slot_type = CODE_ENTRY_SLOT;
    2370             :       } else {
    2371             :         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    2372             :         slot_type = OBJECT_SLOT;
    2373             :       }
    2374             :     }
    2375      555110 :     uintptr_t offset = addr - source_page->address();
    2376             :     DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
    2377      277555 :     result.should_record = true;
    2378      277555 :     result.memory_chunk = source_page;
    2379      277555 :     result.slot_type = slot_type;
    2380      277555 :     result.offset = static_cast<uint32_t>(offset);
    2381             :   }
    2382     7555555 :   return result;
    2383             : }
    2384             : 
    2385     2541885 : void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
    2386             :                                            HeapObject target) {
    2387     2541885 :   RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
    2388     2541885 :   if (info.should_record) {
    2389       85364 :     RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
    2390       85364 :                                            info.offset);
    2391             :   }
    2392     2541882 : }
    2393             : 
    2394             : namespace {
    2395             : 
    2396             : // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
    2397             : // attempt to store a weak reference to strong-only slot to a compilation error.
    2398             : template <typename TSlot, HeapObjectReferenceType reference_type>
    2399             : typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
    2400             : 
    2401             : template <>
    2402             : Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
    2403             :     HeapObject heap_object) {
    2404             :   return heap_object;
    2405             : }
    2406             : 
    2407             : template <>
    2408             : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
    2409             :     HeapObject heap_object) {
    2410             :   return HeapObjectReference::Strong(heap_object);
    2411             : }
    2412             : 
    2413             : template <>
    2414             : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
    2415             :     HeapObject heap_object) {
    2416             :   return HeapObjectReference::Weak(heap_object);
    2417             : }
    2418             : 
    2419             : #ifdef V8_COMPRESS_POINTERS
    2420             : template <>
    2421             : Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
    2422             :     HeapObject heap_object) {
    2423             :   return heap_object;
    2424             : }
    2425             : 
    2426             : template <>
    2427             : MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
    2428             :     HeapObject heap_object) {
    2429             :   return HeapObjectReference::Strong(heap_object);
    2430             : }
    2431             : 
    2432             : // The following specialization
    2433             : //   MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
    2434             : // is not used.
    2435             : #endif
    2436             : 
    2437             : template <AccessMode access_mode, HeapObjectReferenceType reference_type,
    2438             :           typename TSlot>
    2439   424594768 : static inline SlotCallbackResult UpdateSlot(TSlot slot,
    2440             :                                             typename TSlot::TObject old,
    2441             :                                             HeapObject heap_obj) {
    2442             :   static_assert(
    2443             :       std::is_same<TSlot, FullObjectSlot>::value ||
    2444             :           std::is_same<TSlot, ObjectSlot>::value ||
    2445             :           std::is_same<TSlot, FullMaybeObjectSlot>::value ||
    2446             :           std::is_same<TSlot, MaybeObjectSlot>::value,
    2447             :       "Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
    2448             :   MapWord map_word = heap_obj->map_word();
    2449   424594768 :   if (map_word.IsForwardingAddress()) {
    2450             :     DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
    2451             :                    MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
    2452             :                        Page::FromHeapObject(heap_obj)->IsFlagSet(
    2453             :                            Page::COMPACTION_WAS_ABORTED));
    2454             :     typename TSlot::TObject target =
    2455             :         MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
    2456             :     if (access_mode == AccessMode::NON_ATOMIC) {
    2457             :       slot.store(target);
    2458             :     } else {
    2459             :       slot.Release_CompareAndSwap(old, target);
    2460             :     }
    2461             :     DCHECK(!Heap::InFromPage(target));
    2462             :     DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
    2463             :   } else {
    2464             :     DCHECK(heap_obj->map()->IsMap());
    2465             :   }
    2466             :   // OLD_TO_OLD slots are always removed after updating.
    2467   424594768 :   return REMOVE_SLOT;
    2468             : }
    2469             : 
    2470             : template <AccessMode access_mode, typename TSlot>
    2471    92458351 : static inline SlotCallbackResult UpdateSlot(TSlot slot) {
    2472    92458351 :   typename TSlot::TObject obj = slot.Relaxed_Load();
    2473    92801319 :   HeapObject heap_obj;
    2474    92801319 :   if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
    2475     7426984 :     UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
    2476    85374335 :   } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
    2477             :     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
    2478    51741512 :                                                                     heap_obj);
    2479             :   }
    2480             :   return REMOVE_SLOT;
    2481             : }
    2482             : 
    2483             : template <AccessMode access_mode, typename TSlot>
    2484   399741923 : static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
    2485             :   DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
    2486   134837069 :   typename TSlot::TObject obj = slot.Relaxed_Load();
    2487   399779106 :   HeapObject heap_obj;
    2488   399779106 :   if (obj.GetHeapObject(&heap_obj)) {
    2489             :     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
    2490   365413002 :                                                                     heap_obj);
    2491             :   }
    2492             :   return REMOVE_SLOT;
    2493             : }
    2494             : 
    2495             : }  // namespace
    2496             : 
    2497             : // Visitor for updating root pointers and to-space pointers.
    2498             : // It does not expect to encounter pointers to dead objects.
    2499      300354 : class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
    2500             :  public:
    2501      159499 :   PointersUpdatingVisitor() {}
    2502             : 
    2503      300011 :   void VisitPointer(HeapObject host, ObjectSlot p) override {
    2504             :     UpdateStrongSlotInternal(p);
    2505      300017 :   }
    2506             : 
    2507          12 :   void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
    2508             :     UpdateSlotInternal(p);
    2509          12 :   }
    2510             : 
    2511    12462077 :   void VisitPointers(HeapObject host, ObjectSlot start,
    2512             :                      ObjectSlot end) override {
    2513   159476040 :     for (ObjectSlot p = start; p < end; ++p) {
    2514             :       UpdateStrongSlotInternal(p);
    2515             :     }
    2516    12358290 :   }
    2517             : 
    2518           0 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
    2519             :                      MaybeObjectSlot end) final {
    2520    67540378 :     for (MaybeObjectSlot p = start; p < end; ++p) {
    2521             :       UpdateSlotInternal(p);
    2522             :     }
    2523           0 :   }
    2524             : 
    2525   228774770 :   void VisitRootPointer(Root root, const char* description,
    2526             :                         FullObjectSlot p) override {
    2527             :     UpdateRootSlotInternal(p);
    2528   228774782 :   }
    2529             : 
    2530     1565092 :   void VisitRootPointers(Root root, const char* description,
    2531             :                          FullObjectSlot start, FullObjectSlot end) override {
    2532    38982800 :     for (FullObjectSlot p = start; p < end; ++p) {
    2533             :       UpdateRootSlotInternal(p);
    2534             :     }
    2535     1565092 :   }
    2536             : 
    2537           0 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    2538             :     // This visitor nevers visits code objects.
    2539           0 :     UNREACHABLE();
    2540             :   }
    2541             : 
    2542           0 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    2543             :     // This visitor nevers visits code objects.
    2544           0 :     UNREACHABLE();
    2545             :   }
    2546             : 
    2547             :  private:
    2548             :   static inline SlotCallbackResult UpdateRootSlotInternal(FullObjectSlot slot) {
    2549   264627386 :     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
    2550             :   }
    2551             : 
    2552             :   static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
    2553             :       MaybeObjectSlot slot) {
    2554             :     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
    2555             :   }
    2556             : 
    2557             :   static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
    2558   134955684 :     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
    2559             :   }
    2560             : 
    2561             :   static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
    2562    64227968 :     return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
    2563             :   }
    2564             : };
    2565             : 
    2566      101378 : static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
    2567             :                                                         FullObjectSlot p) {
    2568             :   MapWord map_word = HeapObject::cast(*p)->map_word();
    2569             : 
    2570      101378 :   if (map_word.IsForwardingAddress()) {
    2571             :     String new_string = String::cast(map_word.ToForwardingAddress());
    2572             : 
    2573         396 :     if (new_string->IsExternalString()) {
    2574         396 :       MemoryChunk::MoveExternalBackingStoreBytes(
    2575             :           ExternalBackingStoreType::kExternalString,
    2576             :           Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
    2577         792 :           ExternalString::cast(new_string)->ExternalPayloadSize());
    2578             :     }
    2579         396 :     return new_string;
    2580             :   }
    2581             : 
    2582             :   return String::cast(*p);
    2583             : }
    2584             : 
    2585       73955 : void MarkCompactCollector::EvacuatePrologue() {
    2586             :   // New space.
    2587             :   NewSpace* new_space = heap()->new_space();
    2588             :   // Append the list of new space pages to be processed.
    2589      163451 :   for (Page* p :
    2590       89496 :        PageRange(new_space->first_allocatable_address(), new_space->top())) {
    2591       89496 :     new_space_evacuation_pages_.push_back(p);
    2592             :   }
    2593       73955 :   new_space->Flip();
    2594       73955 :   new_space->ResetLinearAllocationArea();
    2595             : 
    2596       73955 :   heap()->new_lo_space()->Flip();
    2597             :   heap()->new_lo_space()->ResetPendingObject();
    2598             : 
    2599             :   // Old space.
    2600             :   DCHECK(old_space_evacuation_pages_.empty());
    2601       73955 :   old_space_evacuation_pages_ = std::move(evacuation_candidates_);
    2602             :   evacuation_candidates_.clear();
    2603             :   DCHECK(evacuation_candidates_.empty());
    2604       73955 : }
    2605             : 
    2606       73955 : void MarkCompactCollector::EvacuateEpilogue() {
    2607             :   aborted_evacuation_candidates_.clear();
    2608             :   // New space.
    2609             :   heap()->new_space()->set_age_mark(heap()->new_space()->top());
    2610             :   // Deallocate unmarked large objects.
    2611       73955 :   heap()->lo_space()->FreeUnmarkedObjects();
    2612       73955 :   heap()->code_lo_space()->FreeUnmarkedObjects();
    2613       73955 :   heap()->new_lo_space()->FreeUnmarkedObjects();
    2614             :   // Old space. Deallocate evacuated candidate pages.
    2615       73955 :   ReleaseEvacuationCandidates();
    2616             :   // Give pages that are queued to be freed back to the OS.
    2617       73955 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2618             : #ifdef DEBUG
    2619             :   // Old-to-old slot sets must be empty after evacuation.
    2620             :   for (Page* p : *heap()->old_space()) {
    2621             :     DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
    2622             :     DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
    2623             :     DCHECK_NULL(p->invalidated_slots());
    2624             :   }
    2625             : #endif
    2626       73955 : }
    2627             : 
    2628             : class Evacuator : public Malloced {
    2629             :  public:
    2630             :   enum EvacuationMode {
    2631             :     kObjectsNewToOld,
    2632             :     kPageNewToOld,
    2633             :     kObjectsOldToOld,
    2634             :     kPageNewToNew,
    2635             :   };
    2636             : 
    2637             :   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
    2638             :     // Note: The order of checks is important in this function.
    2639      176235 :     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
    2640             :       return kPageNewToOld;
    2641      173338 :     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
    2642             :       return kPageNewToNew;
    2643      170252 :     if (chunk->InYoungGeneration()) return kObjectsNewToOld;
    2644             :     return kObjectsOldToOld;
    2645             :   }
    2646             : 
    2647             :   // NewSpacePages with more live bytes than this threshold qualify for fast
    2648             :   // evacuation.
    2649       60144 :   static intptr_t NewSpacePageEvacuationThreshold() {
    2650       60144 :     if (FLAG_page_promotion)
    2651      120248 :       return FLAG_page_promotion_threshold *
    2652      120248 :              MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
    2653          20 :     return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
    2654             :   }
    2655             : 
    2656       77114 :   Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
    2657             :       : heap_(heap),
    2658             :         local_allocator_(heap_),
    2659             :         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
    2660             :         new_space_visitor_(heap_, &local_allocator_, record_visitor,
    2661             :                            &local_pretenuring_feedback_),
    2662             :         new_to_new_page_visitor_(heap_, record_visitor,
    2663             :                                  &local_pretenuring_feedback_),
    2664             :         new_to_old_page_visitor_(heap_, record_visitor,
    2665             :                                  &local_pretenuring_feedback_),
    2666             : 
    2667             :         old_space_visitor_(heap_, &local_allocator_, record_visitor),
    2668             :         duration_(0.0),
    2669      231342 :         bytes_compacted_(0) {}
    2670             : 
    2671      231342 :   virtual ~Evacuator() = default;
    2672             : 
    2673             :   void EvacuatePage(MemoryChunk* chunk);
    2674             : 
    2675         802 :   void AddObserver(MigrationObserver* observer) {
    2676             :     new_space_visitor_.AddObserver(observer);
    2677             :     old_space_visitor_.AddObserver(observer);
    2678         802 :   }
    2679             : 
    2680             :   // Merge back locally cached info sequentially. Note that this method needs
    2681             :   // to be called from the main thread.
    2682             :   inline void Finalize();
    2683             : 
    2684             :   virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
    2685             : 
    2686             :  protected:
    2687             :   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
    2688             : 
    2689             :   // |saved_live_bytes| returns the live bytes of the page that was processed.
    2690             :   virtual void RawEvacuatePage(MemoryChunk* chunk,
    2691             :                                intptr_t* saved_live_bytes) = 0;
    2692             : 
    2693             :   inline Heap* heap() { return heap_; }
    2694             : 
    2695             :   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
    2696       79769 :     duration_ += duration;
    2697       79769 :     bytes_compacted_ += bytes_compacted;
    2698             :   }
    2699             : 
    2700             :   Heap* heap_;
    2701             : 
    2702             :   // Locally cached collector data.
    2703             :   LocalAllocator local_allocator_;
    2704             :   Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
    2705             : 
    2706             :   // Visitors for the corresponding spaces.
    2707             :   EvacuateNewSpaceVisitor new_space_visitor_;
    2708             :   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
    2709             :       new_to_new_page_visitor_;
    2710             :   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
    2711             :       new_to_old_page_visitor_;
    2712             :   EvacuateOldSpaceVisitor old_space_visitor_;
    2713             : 
    2714             :   // Book keeping info.
    2715             :   double duration_;
    2716             :   intptr_t bytes_compacted_;
    2717             : };
    2718             : 
    2719       79716 : void Evacuator::EvacuatePage(MemoryChunk* chunk) {
    2720      239201 :   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
    2721             :   DCHECK(chunk->SweepingDone());
    2722       79716 :   intptr_t saved_live_bytes = 0;
    2723       79716 :   double evacuation_time = 0.0;
    2724             :   {
    2725             :     AlwaysAllocateScope always_allocate(heap()->isolate());
    2726             :     TimedScope timed_scope(&evacuation_time);
    2727       79678 :     RawEvacuatePage(chunk, &saved_live_bytes);
    2728             :   }
    2729       79769 :   ReportCompactionProgress(evacuation_time, saved_live_bytes);
    2730       79769 :   if (FLAG_trace_evacuation) {
    2731           0 :     PrintIsolate(heap()->isolate(),
    2732             :                  "evacuation[%p]: page=%p new_space=%d "
    2733             :                  "page_evacuation=%d executable=%d contains_age_mark=%d "
    2734             :                  "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
    2735             :                  static_cast<void*>(this), static_cast<void*>(chunk),
    2736             :                  chunk->InNewSpace(),
    2737           0 :                  chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
    2738             :                      chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
    2739             :                  chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
    2740             :                  chunk->Contains(heap()->new_space()->age_mark()),
    2741             :                  saved_live_bytes, evacuation_time,
    2742           0 :                  chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
    2743             :   }
    2744       79765 : }
    2745             : 
    2746       77114 : void Evacuator::Finalize() {
    2747       77114 :   local_allocator_.Finalize();
    2748      154228 :   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
    2749       77114 :   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
    2750             :                                        new_to_old_page_visitor_.moved_bytes());
    2751       77114 :   heap()->IncrementSemiSpaceCopiedObjectSize(
    2752       77114 :       new_space_visitor_.semispace_copied_size() +
    2753             :       new_to_new_page_visitor_.moved_bytes());
    2754       77114 :   heap()->IncrementYoungSurvivorsCounter(
    2755       77114 :       new_space_visitor_.promoted_size() +
    2756       77114 :       new_space_visitor_.semispace_copied_size() +
    2757       77114 :       new_to_old_page_visitor_.moved_bytes() +
    2758             :       new_to_new_page_visitor_.moved_bytes());
    2759       77114 :   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
    2760       77114 : }
    2761             : 
    2762      154228 : class FullEvacuator : public Evacuator {
    2763             :  public:
    2764             :   FullEvacuator(MarkCompactCollector* collector,
    2765             :                 RecordMigratedSlotVisitor* record_visitor)
    2766       77114 :       : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
    2767             : 
    2768       76008 :   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
    2769       76008 :     return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
    2770             :   }
    2771             : 
    2772             :  protected:
    2773             :   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
    2774             : 
    2775             :   MarkCompactCollector* collector_;
    2776             : };
    2777             : 
    2778       79733 : void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
    2779             :   const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
    2780      239254 :   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    2781             :                "FullEvacuator::RawEvacuatePage", "evacuation_mode",
    2782             :                evacuation_mode);
    2783             :   MarkCompactCollector::NonAtomicMarkingState* marking_state =
    2784       79737 :       collector_->non_atomic_marking_state();
    2785       79737 :   *live_bytes = marking_state->live_bytes(chunk);
    2786       79737 :   HeapObject failed_object;
    2787       79737 :   switch (evacuation_mode) {
    2788             :     case kObjectsNewToOld:
    2789       69226 :       LiveObjectVisitor::VisitBlackObjectsNoFail(
    2790             :           chunk, marking_state, &new_space_visitor_,
    2791       69226 :           LiveObjectVisitor::kClearMarkbits);
    2792             :       // ArrayBufferTracker will be updated during pointers updating.
    2793       69250 :       break;
    2794             :     case kPageNewToOld:
    2795        1964 :       LiveObjectVisitor::VisitBlackObjectsNoFail(
    2796             :           chunk, marking_state, &new_to_old_page_visitor_,
    2797        1964 :           LiveObjectVisitor::kKeepMarking);
    2798             :       new_to_old_page_visitor_.account_moved_bytes(
    2799             :           marking_state->live_bytes(chunk));
    2800             :       // ArrayBufferTracker will be updated during sweeping.
    2801             :       break;
    2802             :     case kPageNewToNew:
    2803        1544 :       LiveObjectVisitor::VisitBlackObjectsNoFail(
    2804             :           chunk, marking_state, &new_to_new_page_visitor_,
    2805        1544 :           LiveObjectVisitor::kKeepMarking);
    2806             :       new_to_new_page_visitor_.account_moved_bytes(
    2807             :           marking_state->live_bytes(chunk));
    2808             :       // ArrayBufferTracker will be updated during sweeping.
    2809             :       break;
    2810             :     case kObjectsOldToOld: {
    2811        6985 :       const bool success = LiveObjectVisitor::VisitBlackObjects(
    2812             :           chunk, marking_state, &old_space_visitor_,
    2813        6985 :           LiveObjectVisitor::kClearMarkbits, &failed_object);
    2814        7006 :       if (!success) {
    2815             :         // Aborted compaction page. Actual processing happens on the main
    2816             :         // thread for simplicity reasons.
    2817          25 :         collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
    2818             :       } else {
    2819             :         // ArrayBufferTracker will be updated during pointers updating.
    2820             :       }
    2821             :       break;
    2822             :     }
    2823             :   }
    2824       79762 : }
    2825             : 
    2826             : class EvacuationItem : public ItemParallelJob::Item {
    2827             :  public:
    2828       79770 :   explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
    2829      159540 :   ~EvacuationItem() override = default;
    2830             :   MemoryChunk* chunk() const { return chunk_; }
    2831             : 
    2832             :  private:
    2833             :   MemoryChunk* chunk_;
    2834             : };
    2835             : 
    2836      154209 : class PageEvacuationTask : public ItemParallelJob::Task {
    2837             :  public:
    2838             :   PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
    2839             :       : ItemParallelJob::Task(isolate),
    2840             :         evacuator_(evacuator),
    2841      154228 :         tracer_(isolate->heap()->tracer()) {}
    2842             : 
    2843       76029 :   void RunInParallel() override {
    2844      304117 :     TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
    2845             :     EvacuationItem* item = nullptr;
    2846      235542 :     while ((item = GetItem<EvacuationItem>()) != nullptr) {
    2847       79748 :       evacuator_->EvacuatePage(item->chunk());
    2848       79765 :       item->MarkFinished();
    2849             :     }
    2850       76072 :   }
    2851             : 
    2852             :  private:
    2853             :   Evacuator* evacuator_;
    2854             :   GCTracer* tracer_;
    2855             : };
    2856             : 
    2857             : template <class Evacuator, class Collector>
    2858       61254 : void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
    2859             :     Collector* collector, ItemParallelJob* job,
    2860             :     RecordMigratedSlotVisitor* record_visitor,
    2861             :     MigrationObserver* migration_observer, const intptr_t live_bytes) {
    2862             :   // Used for trace summary.
    2863             :   double compaction_speed = 0;
    2864       61254 :   if (FLAG_trace_evacuation) {
    2865           0 :     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
    2866             :   }
    2867             : 
    2868       61254 :   const bool profiling = isolate()->LogObjectRelocation();
    2869             :   ProfilingMigrationObserver profiling_observer(heap());
    2870             : 
    2871             :   const int wanted_num_tasks =
    2872       61254 :       NumberOfParallelCompactionTasks(job->NumberOfItems());
    2873       61254 :   Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
    2874      215482 :   for (int i = 0; i < wanted_num_tasks; i++) {
    2875       77114 :     evacuators[i] = new Evacuator(collector, record_visitor);
    2876       77114 :     if (profiling) evacuators[i]->AddObserver(&profiling_observer);
    2877       77114 :     if (migration_observer != nullptr)
    2878           0 :       evacuators[i]->AddObserver(migration_observer);
    2879      154228 :     job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
    2880             :   }
    2881       61254 :   job->Run();
    2882      215482 :   for (int i = 0; i < wanted_num_tasks; i++) {
    2883       77114 :     evacuators[i]->Finalize();
    2884       77114 :     delete evacuators[i];
    2885             :   }
    2886       61254 :   delete[] evacuators;
    2887             : 
    2888       61254 :   if (FLAG_trace_evacuation) {
    2889           0 :     PrintIsolate(isolate(),
    2890             :                  "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
    2891             :                  "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
    2892             :                  " compaction_speed=%.f\n",
    2893             :                  isolate()->time_millis_since_init(),
    2894             :                  FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
    2895             :                  wanted_num_tasks, job->NumberOfTasks(),
    2896           0 :                  V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
    2897             :                  live_bytes, compaction_speed);
    2898             :   }
    2899       61254 : }
    2900             : 
    2901       71734 : bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
    2902             :   const bool reduce_memory = heap()->ShouldReduceMemory();
    2903             :   const Address age_mark = heap()->new_space()->age_mark();
    2904      120288 :   return !reduce_memory && !p->NeverEvacuate() &&
    2905       63276 :          (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
    2906       74230 :          !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
    2907             : }
    2908             : 
    2909       73955 : void MarkCompactCollector::EvacuatePagesInParallel() {
    2910             :   ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
    2911      209164 :                                  &page_parallel_job_semaphore_);
    2912             :   intptr_t live_bytes = 0;
    2913             : 
    2914       80961 :   for (Page* page : old_space_evacuation_pages_) {
    2915        7006 :     live_bytes += non_atomic_marking_state()->live_bytes(page);
    2916        7006 :     evacuation_job.AddItem(new EvacuationItem(page));
    2917             :   }
    2918             : 
    2919      163451 :   for (Page* page : new_space_evacuation_pages_) {
    2920             :     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
    2921       89496 :     if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
    2922       71734 :     live_bytes += live_bytes_on_page;
    2923       71734 :     if (ShouldMovePage(page, live_bytes_on_page)) {
    2924        2481 :       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
    2925         937 :         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
    2926             :         DCHECK_EQ(heap()->old_space(), page->owner());
    2927             :         // The move added page->allocated_bytes to the old space, but we are
    2928             :         // going to sweep the page and add page->live_byte_count.
    2929             :         heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
    2930             :                                                     page);
    2931             :       } else {
    2932        1544 :         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
    2933             :       }
    2934             :     }
    2935       71734 :     evacuation_job.AddItem(new EvacuationItem(page));
    2936             :   }
    2937             : 
    2938             :   // Promote young generation large objects.
    2939             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2940             :       heap()->incremental_marking()->non_atomic_marking_state();
    2941             : 
    2942       75619 :   for (auto it = heap()->new_lo_space()->begin();
    2943             :        it != heap()->new_lo_space()->end();) {
    2944             :     LargePage* current = *it;
    2945             :     it++;
    2946             :     HeapObject object = current->GetObject();
    2947             :     DCHECK(!marking_state->IsGrey(object));
    2948        1664 :     if (marking_state->IsBlack(object)) {
    2949        1030 :       heap_->lo_space()->PromoteNewLargeObject(current);
    2950             :       current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
    2951        1030 :       evacuation_job.AddItem(new EvacuationItem(current));
    2952             :     }
    2953             :   }
    2954             : 
    2955       86656 :   if (evacuation_job.NumberOfItems() == 0) return;
    2956             : 
    2957             :   RecordMigratedSlotVisitor record_visitor(this);
    2958       61254 :   CreateAndExecuteEvacuationTasks<FullEvacuator>(
    2959       61254 :       this, &evacuation_job, &record_visitor, nullptr, live_bytes);
    2960       61254 :   PostProcessEvacuationCandidates();
    2961             : }
    2962             : 
    2963      147910 : class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
    2964             :  public:
    2965      147910 :   Object RetainAs(Object object) override {
    2966      147910 :     if (object->IsHeapObject()) {
    2967             :       HeapObject heap_object = HeapObject::cast(object);
    2968             :       MapWord map_word = heap_object->map_word();
    2969      147910 :       if (map_word.IsForwardingAddress()) {
    2970        1435 :         return map_word.ToForwardingAddress();
    2971             :       }
    2972             :     }
    2973      146475 :     return object;
    2974             :   }
    2975             : };
    2976             : 
    2977           0 : void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
    2978             :   EvacuateRecordOnlyVisitor visitor(heap());
    2979             :   LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
    2980             :                                              &visitor,
    2981           0 :                                              LiveObjectVisitor::kKeepMarking);
    2982           0 : }
    2983             : 
    2984             : template <class Visitor, typename MarkingState>
    2985        6975 : bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
    2986             :                                           MarkingState* marking_state,
    2987             :                                           Visitor* visitor,
    2988             :                                           IterationMode iteration_mode,
    2989             :                                           HeapObject* failed_object) {
    2990       20956 :   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    2991             :                "LiveObjectVisitor::VisitBlackObjects");
    2992    20513522 :   for (auto object_and_size :
    2993             :        LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
    2994             :     HeapObject const object = object_and_size.first;
    2995    20506541 :     if (!visitor->Visit(object, object_and_size.second)) {
    2996          25 :       if (iteration_mode == kClearMarkbits) {
    2997          25 :         marking_state->bitmap(chunk)->ClearRange(
    2998             :             chunk->AddressToMarkbitIndex(chunk->area_start()),
    2999             :             chunk->AddressToMarkbitIndex(object->address()));
    3000          25 :         *failed_object = object;
    3001             :       }
    3002             :       return false;
    3003             :     }
    3004             :   }
    3005        6981 :   if (iteration_mode == kClearMarkbits) {
    3006             :     marking_state->ClearLiveness(chunk);
    3007             :   }
    3008             :   return true;
    3009             : }
    3010             : 
    3011             : template <class Visitor, typename MarkingState>
    3012       72732 : void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
    3013             :                                                 MarkingState* marking_state,
    3014             :                                                 Visitor* visitor,
    3015             :                                                 IterationMode iteration_mode) {
    3016      218257 :   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3017             :                "LiveObjectVisitor::VisitBlackObjectsNoFail");
    3018       72738 :   if (chunk->IsLargePage()) {
    3019        1028 :     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
    3020        1028 :     if (marking_state->IsBlack(object)) {
    3021        1029 :       const bool success = visitor->Visit(object, object->Size());
    3022             :       USE(success);
    3023             :       DCHECK(success);
    3024             :     }
    3025             :   } else {
    3026    41858230 :     for (auto object_and_size :
    3027             :          LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
    3028    35978377 :       HeapObject const object = object_and_size.first;
    3029             :       DCHECK(marking_state->IsBlack(object));
    3030    35978377 :       const bool success = visitor->Visit(object, object_and_size.second);
    3031             :       USE(success);
    3032             :       DCHECK(success);
    3033             :     }
    3034             :   }
    3035       72787 :   if (iteration_mode == kClearMarkbits) {
    3036             :     marking_state->ClearLiveness(chunk);
    3037             :   }
    3038       72785 : }
    3039             : 
    3040             : template <class Visitor, typename MarkingState>
    3041           0 : void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
    3042             :                                                MarkingState* marking_state,
    3043             :                                                Visitor* visitor,
    3044             :                                                IterationMode iteration_mode) {
    3045           0 :   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3046             :                "LiveObjectVisitor::VisitGreyObjectsNoFail");
    3047           0 :   if (chunk->IsLargePage()) {
    3048           0 :     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
    3049           0 :     if (marking_state->IsGrey(object)) {
    3050           0 :       const bool success = visitor->Visit(object, object->Size());
    3051             :       USE(success);
    3052             :       DCHECK(success);
    3053             :     }
    3054             :   } else {
    3055           0 :     for (auto object_and_size :
    3056             :          LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
    3057           0 :       HeapObject const object = object_and_size.first;
    3058             :       DCHECK(marking_state->IsGrey(object));
    3059           0 :       const bool success = visitor->Visit(object, object_and_size.second);
    3060             :       USE(success);
    3061             :       DCHECK(success);
    3062             :     }
    3063             :   }
    3064           0 :   if (iteration_mode == kClearMarkbits) {
    3065           0 :     marking_state->ClearLiveness(chunk);
    3066             :   }
    3067           0 : }
    3068             : 
    3069             : template <typename MarkingState>
    3070          25 : void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
    3071             :                                            MarkingState* marking_state) {
    3072             :   int new_live_size = 0;
    3073          90 :   for (auto object_and_size :
    3074             :        LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
    3075          65 :     new_live_size += object_and_size.second;
    3076             :   }
    3077          25 :   marking_state->SetLiveBytes(chunk, new_live_size);
    3078          25 : }
    3079             : 
    3080       73955 : void MarkCompactCollector::Evacuate() {
    3081      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
    3082             :   base::MutexGuard guard(heap()->relocation_mutex());
    3083             : 
    3084             :   {
    3085      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
    3086       73955 :     EvacuatePrologue();
    3087             :   }
    3088             : 
    3089             :   {
    3090      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
    3091             :     EvacuationScope evacuation_scope(this);
    3092       73955 :     EvacuatePagesInParallel();
    3093             :   }
    3094             : 
    3095       73955 :   UpdatePointersAfterEvacuation();
    3096             : 
    3097             :   {
    3098      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
    3099       73955 :     if (!heap()->new_space()->Rebalance()) {
    3100           0 :       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
    3101             :     }
    3102             :   }
    3103             : 
    3104             :   // Give pages that are queued to be freed back to the OS. Note that filtering
    3105             :   // slots only handles old space (for unboxed doubles), and thus map space can
    3106             :   // still contain stale pointers. We only free the chunks after pointer updates
    3107             :   // to still have access to page headers.
    3108       73955 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    3109             : 
    3110             :   {
    3111      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
    3112             : 
    3113      163451 :     for (Page* p : new_space_evacuation_pages_) {
    3114       89496 :       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
    3115             :         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
    3116        1544 :         sweeper()->AddPageForIterability(p);
    3117       87952 :       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
    3118             :         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
    3119             :         DCHECK_EQ(OLD_SPACE, p->owner()->identity());
    3120         937 :         sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
    3121             :       }
    3122             :     }
    3123             :     new_space_evacuation_pages_.clear();
    3124             : 
    3125       80961 :     for (Page* p : old_space_evacuation_pages_) {
    3126             :       // Important: skip list should be cleared only after roots were updated
    3127             :       // because root iteration traverses the stack and might have to find
    3128             :       // code objects from non-updated pc pointing into evacuation candidate.
    3129             :       SkipList* list = p->skip_list();
    3130        7006 :       if (list != nullptr) list->Clear();
    3131        7006 :       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
    3132          25 :         sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
    3133             :         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
    3134             :       }
    3135             :     }
    3136             :   }
    3137             : 
    3138             :   {
    3139      295820 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
    3140       73955 :     EvacuateEpilogue();
    3141             :   }
    3142             : 
    3143             : #ifdef VERIFY_HEAP
    3144             :   if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
    3145             :     FullEvacuationVerifier verifier(heap());
    3146             :     verifier.Run();
    3147             :   }
    3148             : #endif
    3149       73955 : }
    3150             : 
    3151      477225 : class UpdatingItem : public ItemParallelJob::Item {
    3152             :  public:
    3153      954450 :   ~UpdatingItem() override = default;
    3154             :   virtual void Process() = 0;
    3155             : };
    3156             : 
    3157      639765 : class PointersUpdatingTask : public ItemParallelJob::Task {
    3158             :  public:
    3159             :   explicit PointersUpdatingTask(Isolate* isolate,
    3160             :                                 GCTracer::BackgroundScope::ScopeId scope)
    3161             :       : ItemParallelJob::Task(isolate),
    3162             :         tracer_(isolate->heap()->tracer()),
    3163      640872 :         scope_(scope) {}
    3164             : 
    3165      263784 :   void RunInParallel() override {
    3166     1054483 :     TRACE_BACKGROUND_GC(tracer_, scope_);
    3167             :     UpdatingItem* item = nullptr;
    3168     1217408 :     while ((item = GetItem<UpdatingItem>()) != nullptr) {
    3169      477044 :       item->Process();
    3170      476583 :       item->MarkFinished();
    3171             :     }
    3172      264064 :   }
    3173             : 
    3174             :  private:
    3175             :   GCTracer* tracer_;
    3176             :   GCTracer::BackgroundScope::ScopeId scope_;
    3177             : };
    3178             : 
    3179             : template <typename MarkingState>
    3180             : class ToSpaceUpdatingItem : public UpdatingItem {
    3181             :  public:
    3182             :   explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
    3183             :                                MarkingState* marking_state)
    3184             :       : chunk_(chunk),
    3185             :         start_(start),
    3186             :         end_(end),
    3187       76223 :         marking_state_(marking_state) {}
    3188      152446 :   ~ToSpaceUpdatingItem() override = default;
    3189             : 
    3190       76221 :   void Process() override {
    3191      152442 :     if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
    3192             :       // New->new promoted pages contain garbage so they require iteration using
    3193             :       // markbits.
    3194        1544 :       ProcessVisitLive();
    3195             :     } else {
    3196       74677 :       ProcessVisitAll();
    3197             :     }
    3198       76222 :   }
    3199             : 
    3200             :  private:
    3201       84003 :   void ProcessVisitAll() {
    3202      242684 :     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3203             :                  "ToSpaceUpdatingItem::ProcessVisitAll");
    3204             :     PointersUpdatingVisitor visitor;
    3205    25155891 :     for (Address cur = start_; cur < end_;) {
    3206    25081213 :       HeapObject object = HeapObject::FromAddress(cur);
    3207             :       Map map = object->map();
    3208    25081213 :       int size = object->SizeFromMap(map);
    3209             :       object->IterateBodyFast(map, size, &visitor);
    3210    25071888 :       cur += size;
    3211             :     }
    3212       74678 :   }
    3213             : 
    3214        1541 :   void ProcessVisitLive() {
    3215        4626 :     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3216             :                  "ToSpaceUpdatingItem::ProcessVisitLive");
    3217             :     // For young generation evacuations we want to visit grey objects, for
    3218             :     // full MC, we need to visit black objects.
    3219             :     PointersUpdatingVisitor visitor;
    3220     3038004 :     for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
    3221             :              chunk_, marking_state_->bitmap(chunk_))) {
    3222     3034919 :       object_and_size.first->IterateBodyFast(&visitor);
    3223             :     }
    3224        1544 :   }
    3225             : 
    3226             :   MemoryChunk* chunk_;
    3227             :   Address start_;
    3228             :   Address end_;
    3229             :   MarkingState* marking_state_;
    3230             : };
    3231             : 
    3232             : template <typename MarkingState>
    3233             : class RememberedSetUpdatingItem : public UpdatingItem {
    3234             :  public:
    3235             :   explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
    3236             :                                      MemoryChunk* chunk,
    3237             :                                      RememberedSetUpdatingMode updating_mode)
    3238             :       : heap_(heap),
    3239             :         marking_state_(marking_state),
    3240             :         chunk_(chunk),
    3241      314318 :         updating_mode_(updating_mode) {}
    3242      628636 :   ~RememberedSetUpdatingItem() override = default;
    3243             : 
    3244      313746 :   void Process() override {
    3245      941509 :     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3246             :                  "RememberedSetUpdatingItem::Process");
    3247      313746 :     base::MutexGuard guard(chunk_->mutex());
    3248      313967 :     CodePageMemoryModificationScope memory_modification_scope(chunk_);
    3249      313911 :     UpdateUntypedPointers();
    3250      313955 :     UpdateTypedPointers();
    3251      313970 :   }
    3252             : 
    3253             :  private:
    3254             :   template <typename TSlot>
    3255    37405810 :   inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
    3256             :     static_assert(
    3257             :         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
    3258             :             std::is_same<TSlot, MaybeObjectSlot>::value,
    3259             :         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
    3260             :     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
    3261             :     HeapObject heap_object;
    3262    37405810 :     if (!(*slot).GetHeapObject(&heap_object)) {
    3263             :       return REMOVE_SLOT;
    3264             :     }
    3265    39327652 :     if (Heap::InFromPage(heap_object)) {
    3266             :       MapWord map_word = heap_object->map_word();
    3267    33187410 :       if (map_word.IsForwardingAddress()) {
    3268             :         HeapObjectReference::Update(THeapObjectSlot(slot),
    3269             :                                     map_word.ToForwardingAddress());
    3270             :       }
    3271             :       bool success = (*slot).GetHeapObject(&heap_object);
    3272             :       USE(success);
    3273             :       DCHECK(success);
    3274             :       // If the object was in from space before and is after executing the
    3275             :       // callback in to space, the object is still live.
    3276             :       // Unfortunately, we do not know about the slot. It could be in a
    3277             :       // just freed free space object.
    3278    33187410 :       if (Heap::InToPage(heap_object)) {
    3279             :         return KEEP_SLOT;
    3280             :       }
    3281     6140242 :     } else if (Heap::InToPage(heap_object)) {
    3282             :       // Slots can point to "to" space if the page has been moved, or if the
    3283             :       // slot has been recorded multiple times in the remembered set, or
    3284             :       // if the slot was already updated during old->old updating.
    3285             :       // In case the page has been moved, check markbits to determine liveness
    3286             :       // of the slot. In the other case, the slot can just be kept.
    3287     1334476 :       if (Page::FromHeapObject(heap_object)
    3288             :               ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
    3289             :         // IsBlackOrGrey is required because objects are marked as grey for
    3290             :         // the young generation collector while they are black for the full
    3291             :         // MC.);
    3292     1334937 :         if (marking_state_->IsBlackOrGrey(heap_object)) {
    3293             :           return KEEP_SLOT;
    3294             :         } else {
    3295         667 :           return REMOVE_SLOT;
    3296             :         }
    3297             :       }
    3298             :       return KEEP_SLOT;
    3299             :     } else {
    3300             :       DCHECK(!Heap::InYoungGeneration(heap_object));
    3301             :     }
    3302             :     return REMOVE_SLOT;
    3303             :   }
    3304             : 
    3305      313723 :   void UpdateUntypedPointers() {
    3306      313723 :     if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
    3307      307192 :       RememberedSet<OLD_TO_NEW>::Iterate(
    3308             :           chunk_,
    3309    37413460 :           [this](MaybeObjectSlot slot) {
    3310             :             return CheckAndUpdateOldToNewSlot(slot);
    3311    37413460 :           },
    3312             :           SlotSet::PREFREE_EMPTY_BUCKETS);
    3313             :     }
    3314      627973 :     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
    3315      314005 :         (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
    3316       11384 :       InvalidatedSlotsFilter filter(chunk_);
    3317       11369 :       RememberedSet<OLD_TO_OLD>::Iterate(
    3318             :           chunk_,
    3319    57065096 :           [&filter](MaybeObjectSlot slot) {
    3320    28532548 :             if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
    3321    28374229 :             return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
    3322             :           },
    3323             :           SlotSet::PREFREE_EMPTY_BUCKETS);
    3324             :     }
    3325      627921 :     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
    3326      313940 :         chunk_->invalidated_slots() != nullptr) {
    3327             : #ifdef DEBUG
    3328             :       for (auto object_size : *chunk_->invalidated_slots()) {
    3329             :         HeapObject object = object_size.first;
    3330             :         int size = object_size.second;
    3331             :         DCHECK_LE(object->SizeFromMap(object->map()), size);
    3332             :       }
    3333             : #endif
    3334             :       // The invalidated slots are not needed after old-to-old slots were
    3335             :       // processsed.
    3336         118 :       chunk_->ReleaseInvalidatedSlots();
    3337             :     }
    3338      313981 :   }
    3339             : 
    3340      313840 :   void UpdateTypedPointers() {
    3341      313840 :     if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
    3342             :         nullptr) {
    3343        3618 :       CHECK_NE(chunk_->owner(), heap_->map_space());
    3344             :       const auto check_and_update_old_to_new_slot_fn =
    3345       28183 :           [this](FullMaybeObjectSlot slot) {
    3346             :             return CheckAndUpdateOldToNewSlot(slot);
    3347       28183 :           };
    3348        1809 :       RememberedSet<OLD_TO_NEW>::IterateTyped(
    3349             :           chunk_, [=](SlotType slot_type, Address slot) {
    3350       28184 :             return UpdateTypedSlotHelper::UpdateTypedSlot(
    3351       28184 :                 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
    3352       28184 :           });
    3353             :     }
    3354      627704 :     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
    3355      313838 :         (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
    3356             :          nullptr)) {
    3357        1620 :       CHECK_NE(chunk_->owner(), heap_->map_space());
    3358         810 :       RememberedSet<OLD_TO_OLD>::IterateTyped(
    3359             :           chunk_, [=](SlotType slot_type, Address slot) {
    3360             :             // Using UpdateStrongSlot is OK here, because there are no weak
    3361             :             // typed slots.
    3362      277542 :             return UpdateTypedSlotHelper::UpdateTypedSlot(
    3363      277542 :                 heap_, slot_type, slot,
    3364             :                 UpdateStrongSlot<AccessMode::NON_ATOMIC, FullMaybeObjectSlot>);
    3365      277542 :           });
    3366             :     }
    3367      313867 :   }
    3368             : 
    3369             :   Heap* heap_;
    3370             :   MarkingState* marking_state_;
    3371             :   MemoryChunk* chunk_;
    3372             :   RememberedSetUpdatingMode updating_mode_;
    3373             : };
    3374             : 
    3375       76223 : UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
    3376             :     MemoryChunk* chunk, Address start, Address end) {
    3377             :   return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
    3378      152446 :       chunk, start, end, non_atomic_marking_state());
    3379             : }
    3380             : 
    3381      314318 : UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
    3382             :     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
    3383             :   return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
    3384      628636 :       heap(), non_atomic_marking_state(), chunk, updating_mode);
    3385             : }
    3386             : 
    3387             : // Update array buffers on a page that has been evacuated by copying objects.
    3388             : // Target page exclusivity in old space is guaranteed by the fact that
    3389             : // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
    3390             : // free list items of a given page. For new space the tracker will update
    3391             : // using a lock.
    3392             : class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
    3393             :  public:
    3394             :   enum EvacuationState { kRegular, kAborted };
    3395             : 
    3396             :   explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
    3397       86684 :       : page_(page), state_(state) {}
    3398      173368 :   ~ArrayBufferTrackerUpdatingItem() override = default;
    3399             : 
    3400       86654 :   void Process() override {
    3401      259998 :     TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    3402             :                  "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
    3403             :                  state_);
    3404       86656 :     switch (state_) {
    3405             :       case EvacuationState::kRegular:
    3406             :         ArrayBufferTracker::ProcessBuffers(
    3407       86648 :             page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
    3408       86680 :         break;
    3409             :       case EvacuationState::kAborted:
    3410             :         ArrayBufferTracker::ProcessBuffers(
    3411           0 :             page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
    3412           0 :         break;
    3413             :     }
    3414       86677 :   }
    3415             : 
    3416             :  private:
    3417             :   Page* const page_;
    3418             :   const EvacuationState state_;
    3419             : };
    3420             : 
    3421       73955 : int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
    3422             :     ItemParallelJob* job) {
    3423             :   // Seed to space pages.
    3424             :   const Address space_start = heap()->new_space()->first_allocatable_address();
    3425             :   const Address space_end = heap()->new_space()->top();
    3426             :   int pages = 0;
    3427      150178 :   for (Page* page : PageRange(space_start, space_end)) {
    3428             :     Address start =
    3429       76223 :         page->Contains(space_start) ? space_start : page->area_start();
    3430       76223 :     Address end = page->Contains(space_end) ? space_end : page->area_end();
    3431       76223 :     job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
    3432       76223 :     pages++;
    3433             :   }
    3434       73955 :   if (pages == 0) return 0;
    3435       73955 :   return NumberOfParallelToSpacePointerUpdateTasks(pages);
    3436             : }
    3437             : 
    3438             : template <typename IterateableSpace>
    3439      369775 : int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
    3440             :     ItemParallelJob* job, IterateableSpace* space,
    3441             :     RememberedSetUpdatingMode mode) {
    3442             :   int pages = 0;
    3443      907188 :   for (MemoryChunk* chunk : *space) {
    3444             :     const bool contains_old_to_old_slots =
    3445             :         chunk->slot_set<OLD_TO_OLD>() != nullptr ||
    3446     1063422 :         chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
    3447             :     const bool contains_old_to_new_slots =
    3448             :         chunk->slot_set<OLD_TO_NEW>() != nullptr ||
    3449      767020 :         chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
    3450             :     const bool contains_invalidated_slots =
    3451             :         chunk->invalidated_slots() != nullptr;
    3452      537413 :     if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
    3453             :         !contains_invalidated_slots)
    3454             :       continue;
    3455      314318 :     if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
    3456             :         contains_invalidated_slots) {
    3457      314318 :       job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
    3458      314318 :       pages++;
    3459             :     }
    3460             :   }
    3461      369775 :   return pages;
    3462             : }
    3463             : 
    3464       73955 : int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
    3465             :     ItemParallelJob* job) {
    3466             :   int pages = 0;
    3467      163451 :   for (Page* p : new_space_evacuation_pages_) {
    3468       89496 :     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
    3469       87015 :       if (p->local_tracker() == nullptr) continue;
    3470             : 
    3471       85707 :       pages++;
    3472       85707 :       job->AddItem(new ArrayBufferTrackerUpdatingItem(
    3473             :           p, ArrayBufferTrackerUpdatingItem::kRegular));
    3474             :     }
    3475             :   }
    3476       73955 :   return pages;
    3477             : }
    3478             : 
    3479       73955 : int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
    3480             :     ItemParallelJob* job) {
    3481             :   int pages = 0;
    3482       80961 :   for (Page* p : old_space_evacuation_pages_) {
    3483       14012 :     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
    3484             :         p->IsEvacuationCandidate()) {
    3485        6981 :       if (p->local_tracker() == nullptr) continue;
    3486             : 
    3487         977 :       pages++;
    3488         977 :       job->AddItem(new ArrayBufferTrackerUpdatingItem(
    3489             :           p, ArrayBufferTrackerUpdatingItem::kRegular));
    3490             :     }
    3491             :   }
    3492       73980 :   for (auto object_and_page : aborted_evacuation_candidates_) {
    3493             :     Page* p = object_and_page.second;
    3494          25 :     if (p->local_tracker() == nullptr) continue;
    3495             : 
    3496           0 :     pages++;
    3497           0 :     job->AddItem(new ArrayBufferTrackerUpdatingItem(
    3498             :         p, ArrayBufferTrackerUpdatingItem::kAborted));
    3499             :   }
    3500       73955 :   return pages;
    3501             : }
    3502             : 
    3503       73955 : void MarkCompactCollector::UpdatePointersAfterEvacuation() {
    3504      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
    3505             : 
    3506             :   PointersUpdatingVisitor updating_visitor;
    3507             : 
    3508             :   {
    3509      295820 :     TRACE_GC(heap()->tracer(),
    3510             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
    3511       73955 :     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
    3512             :   }
    3513             : 
    3514             :   {
    3515      295820 :     TRACE_GC(heap()->tracer(),
    3516             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
    3517             :     ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
    3518      221865 :                                  &page_parallel_job_semaphore_);
    3519             : 
    3520             :     int remembered_set_pages = 0;
    3521       73955 :     remembered_set_pages += CollectRememberedSetUpdatingItems(
    3522       73955 :         &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
    3523             :     remembered_set_pages += CollectRememberedSetUpdatingItems(
    3524       73955 :         &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
    3525             :     remembered_set_pages += CollectRememberedSetUpdatingItems(
    3526       73955 :         &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
    3527             :     remembered_set_pages += CollectRememberedSetUpdatingItems(
    3528       73955 :         &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
    3529             :     const int remembered_set_tasks =
    3530             :         remembered_set_pages == 0
    3531             :             ? 0
    3532       73706 :             : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
    3533      147661 :                                                  old_to_new_slots_);
    3534       73955 :     const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
    3535             :     const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
    3536      470627 :     for (int i = 0; i < num_tasks; i++) {
    3537      198336 :       updating_job.AddTask(new PointersUpdatingTask(
    3538             :           isolate(),
    3539      198336 :           GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
    3540             :     }
    3541       73955 :     updating_job.Run();
    3542             :   }
    3543             : 
    3544             :   {
    3545             :     // - Update pointers in map space in a separate phase to avoid data races
    3546             :     //   with Map->LayoutDescriptor edge.
    3547             :     // - Update array buffer trackers in the second phase to have access to
    3548             :     //   byte length which is potentially a HeapNumber.
    3549      295820 :     TRACE_GC(heap()->tracer(),
    3550             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
    3551             :     ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
    3552      147910 :                                  &page_parallel_job_semaphore_);
    3553             : 
    3554             :     int array_buffer_pages = 0;
    3555       73955 :     array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
    3556       73955 :     array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
    3557             : 
    3558             :     int remembered_set_pages = 0;
    3559             :     remembered_set_pages += CollectRememberedSetUpdatingItems(
    3560       73955 :         &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
    3561             :     const int remembered_set_tasks =
    3562             :         remembered_set_pages == 0
    3563             :             ? 0
    3564       73777 :             : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
    3565      147732 :                                                  old_to_new_slots_);
    3566             :     const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
    3567       73955 :     if (num_tasks > 0) {
    3568      318155 :       for (int i = 0; i < num_tasks; i++) {
    3569      122100 :         updating_job.AddTask(new PointersUpdatingTask(
    3570             :             isolate(),
    3571      122100 :             GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
    3572             :       }
    3573       73955 :       updating_job.Run();
    3574       73955 :       heap()->array_buffer_collector()->FreeAllocations();
    3575             :     }
    3576             :   }
    3577             : 
    3578             :   {
    3579      295820 :     TRACE_GC(heap()->tracer(),
    3580             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
    3581             :     // Update pointers from external string table.
    3582       73955 :     heap_->UpdateReferencesInExternalStringTable(
    3583       73955 :         &UpdateReferenceInExternalStringTableEntry);
    3584             : 
    3585       73955 :     EvacuationWeakObjectRetainer evacuation_object_retainer;
    3586       73955 :     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
    3587             :   }
    3588       73955 : }
    3589             : 
    3590          25 : void MarkCompactCollector::ReportAbortedEvacuationCandidate(
    3591             :     HeapObject failed_object, MemoryChunk* chunk) {
    3592          25 :   base::MutexGuard guard(&mutex_);
    3593             : 
    3594          25 :   aborted_evacuation_candidates_.push_back(
    3595          50 :       std::make_pair(failed_object, static_cast<Page*>(chunk)));
    3596          25 : }
    3597             : 
    3598       61254 : void MarkCompactCollector::PostProcessEvacuationCandidates() {
    3599       61279 :   for (auto object_and_page : aborted_evacuation_candidates_) {
    3600             :     HeapObject failed_object = object_and_page.first;
    3601             :     Page* page = object_and_page.second;
    3602             :     page->SetFlag(Page::COMPACTION_WAS_ABORTED);
    3603             :     // Aborted compaction page. We have to record slots here, since we
    3604             :     // might not have recorded them in first place.
    3605             : 
    3606             :     // Remove outdated slots.
    3607          25 :     RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
    3608             :                                            failed_object->address(),
    3609          25 :                                            SlotSet::PREFREE_EMPTY_BUCKETS);
    3610             :     RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
    3611          25 :                                                 failed_object->address());
    3612             :     // Recompute live bytes.
    3613          25 :     LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
    3614             :     // Re-record slots.
    3615             :     EvacuateRecordOnlyVisitor record_visitor(heap());
    3616             :     LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
    3617             :                                                &record_visitor,
    3618          25 :                                                LiveObjectVisitor::kKeepMarking);
    3619             :     // Array buffers will be processed during pointer updating.
    3620             :   }
    3621             :   const int aborted_pages =
    3622       61254 :       static_cast<int>(aborted_evacuation_candidates_.size());
    3623             :   int aborted_pages_verified = 0;
    3624       68260 :   for (Page* p : old_space_evacuation_pages_) {
    3625        7006 :     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
    3626             :       // After clearing the evacuation candidate flag the page is again in a
    3627             :       // regular state.
    3628             :       p->ClearEvacuationCandidate();
    3629             :       aborted_pages_verified++;
    3630             :     } else {
    3631             :       DCHECK(p->IsEvacuationCandidate());
    3632             :       DCHECK(p->SweepingDone());
    3633             :       p->owner()->memory_chunk_list().Remove(p);
    3634             :     }
    3635             :   }
    3636             :   DCHECK_EQ(aborted_pages_verified, aborted_pages);
    3637       61254 :   if (FLAG_trace_evacuation && (aborted_pages > 0)) {
    3638             :     PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
    3639           0 :                  isolate()->time_millis_since_init(), aborted_pages);
    3640             :   }
    3641       61254 : }
    3642             : 
    3643       73955 : void MarkCompactCollector::ReleaseEvacuationCandidates() {
    3644       80961 :   for (Page* p : old_space_evacuation_pages_) {
    3645        7006 :     if (!p->IsEvacuationCandidate()) continue;
    3646             :     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
    3647             :     non_atomic_marking_state()->SetLiveBytes(p, 0);
    3648        6981 :     CHECK(p->SweepingDone());
    3649        6981 :     space->ReleasePage(p);
    3650             :   }
    3651             :   old_space_evacuation_pages_.clear();
    3652       73955 :   compacting_ = false;
    3653       73955 : }
    3654             : 
    3655      221865 : void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
    3656      221865 :   space->ClearStats();
    3657             : 
    3658             :   int will_be_swept = 0;
    3659             :   bool unused_page_present = false;
    3660             : 
    3661             :   // Loop needs to support deletion if live bytes == 0 for a page.
    3662      700456 :   for (auto it = space->begin(); it != space->end();) {
    3663             :     Page* p = *(it++);
    3664             :     DCHECK(p->SweepingDone());
    3665             : 
    3666      478591 :     if (p->IsEvacuationCandidate()) {
    3667             :       // Will be processed in Evacuate.
    3668             :       DCHECK(!evacuation_candidates_.empty());
    3669             :       continue;
    3670             :     }
    3671             : 
    3672      471585 :     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
    3673             :       // We need to sweep the page to get it into an iterable state again. Note
    3674             :       // that this adds unusable memory into the free list that is later on
    3675             :       // (in the free list) dropped again. Since we only use the flag for
    3676             :       // testing this is fine.
    3677             :       p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
    3678             :       sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
    3679             :                           Heap::ShouldZapGarbage()
    3680             :                               ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
    3681         177 :                               : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
    3682             :       space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
    3683             :       continue;
    3684             :     }
    3685             : 
    3686             :     // One unused page is kept, all further are released before sweeping them.
    3687      471408 :     if (non_atomic_marking_state()->live_bytes(p) == 0) {
    3688        9455 :       if (unused_page_present) {
    3689             :         if (FLAG_gc_verbose) {
    3690             :           PrintIsolate(isolate(), "sweeping: released page: %p",
    3691             :                        static_cast<void*>(p));
    3692             :         }
    3693        2726 :         ArrayBufferTracker::FreeAll(p);
    3694             :         space->memory_chunk_list().Remove(p);
    3695        2726 :         space->ReleasePage(p);
    3696        2726 :         continue;
    3697             :       }
    3698             :       unused_page_present = true;
    3699             :     }
    3700             : 
    3701      468682 :     sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
    3702             :     will_be_swept++;
    3703             :   }
    3704             : 
    3705             :   if (FLAG_gc_verbose) {
    3706             :     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
    3707             :                  space->name(), will_be_swept);
    3708             :   }
    3709      221865 : }
    3710             : 
    3711       73955 : void MarkCompactCollector::StartSweepSpaces() {
    3712      295820 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
    3713             : #ifdef DEBUG
    3714             :   state_ = SWEEP_SPACES;
    3715             : #endif
    3716             : 
    3717             :   {
    3718             :     {
    3719             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    3720      147910 :                                   GCTracer::Scope::MC_SWEEP_OLD);
    3721       73955 :       StartSweepSpace(heap()->old_space());
    3722             :     }
    3723             :     {
    3724             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    3725      147910 :                                   GCTracer::Scope::MC_SWEEP_CODE);
    3726       73955 :       StartSweepSpace(heap()->code_space());
    3727             :     }
    3728             :     {
    3729             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    3730      147910 :                                   GCTracer::Scope::MC_SWEEP_MAP);
    3731       73955 :       StartSweepSpace(heap()->map_space());
    3732             :     }
    3733       73955 :     sweeper()->StartSweeping();
    3734             :   }
    3735       73955 : }
    3736             : 
    3737           0 : void MarkCompactCollector::MarkingWorklist::PrintWorklist(
    3738             :     const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
    3739             :   std::map<InstanceType, int> count;
    3740           0 :   int total_count = 0;
    3741           0 :   worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
    3742           0 :     ++total_count;
    3743           0 :     count[obj->map()->instance_type()]++;
    3744           0 :   });
    3745             :   std::vector<std::pair<int, InstanceType>> rank;
    3746           0 :   rank.reserve(count.size());
    3747           0 :   for (const auto& i : count) {
    3748           0 :     rank.emplace_back(i.second, i.first);
    3749             :   }
    3750             :   std::map<InstanceType, std::string> instance_type_name;
    3751             : #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
    3752           0 :   INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
    3753             : #undef INSTANCE_TYPE_NAME
    3754             :   std::sort(rank.begin(), rank.end(),
    3755             :             std::greater<std::pair<int, InstanceType>>());
    3756           0 :   PrintF("Worklist %s: %d\n", worklist_name, total_count);
    3757           0 :   for (auto i : rank) {
    3758           0 :     PrintF("  [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
    3759             :   }
    3760           0 : }
    3761             : 
    3762             : #ifdef ENABLE_MINOR_MC
    3763             : 
    3764             : namespace {
    3765             : 
    3766             : #ifdef VERIFY_HEAP
    3767             : 
    3768             : class YoungGenerationMarkingVerifier : public MarkingVerifier {
    3769             :  public:
    3770             :   explicit YoungGenerationMarkingVerifier(Heap* heap)
    3771             :       : MarkingVerifier(heap),
    3772             :         marking_state_(
    3773             :             heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
    3774             : 
    3775             :   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
    3776             :       const MemoryChunk* chunk) override {
    3777             :     return marking_state_->bitmap(chunk);
    3778             :   }
    3779             : 
    3780             :   bool IsMarked(HeapObject object) override {
    3781             :     return marking_state_->IsGrey(object);
    3782             :   }
    3783             : 
    3784             :   bool IsBlackOrGrey(HeapObject object) override {
    3785             :     return marking_state_->IsBlackOrGrey(object);
    3786             :   }
    3787             : 
    3788             :   void Run() override {
    3789             :     VerifyRoots(VISIT_ALL_IN_SCAVENGE);
    3790             :     VerifyMarking(heap_->new_space());
    3791             :   }
    3792             : 
    3793             :  protected:
    3794             :   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
    3795             :     VerifyPointersImpl(start, end);
    3796             :   }
    3797             : 
    3798             :   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
    3799             :     VerifyPointersImpl(start, end);
    3800             :   }
    3801             : 
    3802             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3803             :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3804             :     VerifyHeapObjectImpl(target);
    3805             :   }
    3806             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3807             :     VerifyHeapObjectImpl(rinfo->target_object());
    3808             :   }
    3809             :   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
    3810             :     VerifyPointersImpl(start, end);
    3811             :   }
    3812             : 
    3813             :  private:
    3814             :   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
    3815             :     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
    3816             :   }
    3817             : 
    3818             :   template <typename TSlot>
    3819             :   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
    3820             :     for (TSlot slot = start; slot < end; ++slot) {
    3821             :       typename TSlot::TObject object = *slot;
    3822             :       HeapObject heap_object;
    3823             :       // Minor MC treats weak references as strong.
    3824             :       if (object.GetHeapObject(&heap_object)) {
    3825             :         VerifyHeapObjectImpl(heap_object);
    3826             :       }
    3827             :     }
    3828             :   }
    3829             : 
    3830             :   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
    3831             : };
    3832             : 
    3833             : class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
    3834             :  public:
    3835             :   explicit YoungGenerationEvacuationVerifier(Heap* heap)
    3836             :       : EvacuationVerifier(heap) {}
    3837             : 
    3838             :   void Run() override {
    3839             :     VerifyRoots(VISIT_ALL_IN_SCAVENGE);
    3840             :     VerifyEvacuation(heap_->new_space());
    3841             :     VerifyEvacuation(heap_->old_space());
    3842             :     VerifyEvacuation(heap_->code_space());
    3843             :     VerifyEvacuation(heap_->map_space());
    3844             :   }
    3845             : 
    3846             :  protected:
    3847             :   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
    3848             :     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
    3849             :                   Heap::InToPage(heap_object));
    3850             :   }
    3851             : 
    3852             :   template <typename TSlot>
    3853             :   void VerifyPointersImpl(TSlot start, TSlot end) {
    3854             :     for (TSlot current = start; current < end; ++current) {
    3855             :       typename TSlot::TObject object = *current;
    3856             :       HeapObject heap_object;
    3857             :       if (object.GetHeapObject(&heap_object)) {
    3858             :         VerifyHeapObjectImpl(heap_object);
    3859             :       }
    3860             :     }
    3861             :   }
    3862             : 
    3863             :   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
    3864             :     VerifyPointersImpl(start, end);
    3865             :   }
    3866             :   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
    3867             :     VerifyPointersImpl(start, end);
    3868             :   }
    3869             :   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
    3870             :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    3871             :     VerifyHeapObjectImpl(target);
    3872             :   }
    3873             :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
    3874             :     VerifyHeapObjectImpl(rinfo->target_object());
    3875             :   }
    3876             :   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
    3877             :     VerifyPointersImpl(start, end);
    3878             :   }
    3879             : };
    3880             : 
    3881             : #endif  // VERIFY_HEAP
    3882             : 
    3883           0 : bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
    3884             :   DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
    3885           0 :   return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
    3886             :                                              ->non_atomic_marking_state()
    3887           0 :                                              ->IsGrey(HeapObject::cast(*p));
    3888             : }
    3889             : 
    3890             : }  // namespace
    3891             : 
    3892      123038 : class YoungGenerationMarkingVisitor final
    3893             :     : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
    3894             :  public:
    3895             :   YoungGenerationMarkingVisitor(
    3896             :       MinorMarkCompactCollector::MarkingState* marking_state,
    3897             :       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
    3898      123068 :       : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
    3899             : 
    3900           0 :   V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
    3901             :                                ObjectSlot end) final {
    3902             :     VisitPointersImpl(host, start, end);
    3903           0 :   }
    3904             : 
    3905           0 :   V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
    3906             :                                MaybeObjectSlot end) final {
    3907             :     VisitPointersImpl(host, start, end);
    3908           0 :   }
    3909             : 
    3910           0 :   V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
    3911             :     VisitPointerImpl(host, slot);
    3912           0 :   }
    3913             : 
    3914           0 :   V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
    3915             :     VisitPointerImpl(host, slot);
    3916           0 :   }
    3917             : 
    3918           0 :   V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    3919             :     // Code objects are not expected in new space.
    3920           0 :     UNREACHABLE();
    3921             :   }
    3922             : 
    3923           0 :   V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    3924             :     // Code objects are not expected in new space.
    3925           0 :     UNREACHABLE();
    3926             :   }
    3927             : 
    3928             :  private:
    3929             :   template <typename TSlot>
    3930             :   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
    3931           0 :     for (TSlot slot = start; slot < end; ++slot) {
    3932             :       VisitPointer(host, slot);
    3933             :     }
    3934             :   }
    3935             : 
    3936             :   template <typename TSlot>
    3937             :   V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
    3938           0 :     typename TSlot::TObject target = *slot;
    3939           0 :     if (Heap::InYoungGeneration(target)) {
    3940             :       // Treat weak references as strong.
    3941             :       // TODO(marja): Proper weakness handling for minor-mcs.
    3942           0 :       HeapObject target_object = target.GetHeapObject();
    3943           0 :       MarkObjectViaMarkingWorklist(target_object);
    3944             :     }
    3945             :   }
    3946             : 
    3947           0 :   inline void MarkObjectViaMarkingWorklist(HeapObject object) {
    3948           0 :     if (marking_state_->WhiteToGrey(object)) {
    3949             :       // Marking deque overflow is unsupported for the young generation.
    3950           0 :       CHECK(worklist_.Push(object));
    3951             :     }
    3952           0 :   }
    3953             : 
    3954             :   MinorMarkCompactCollector::MarkingWorklist::View worklist_;
    3955             :   MinorMarkCompactCollector::MarkingState* marking_state_;
    3956             : };
    3957             : 
    3958       61534 : void MinorMarkCompactCollector::SetUp() {}
    3959             : 
    3960       61518 : void MinorMarkCompactCollector::TearDown() {}
    3961             : 
    3962       61534 : MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
    3963             :     : MarkCompactCollectorBase(heap),
    3964             :       worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
    3965             :       main_marking_visitor_(new YoungGenerationMarkingVisitor(
    3966       61534 :           marking_state(), worklist_, kMainMarker)),
    3967      246136 :       page_parallel_job_semaphore_(0) {
    3968             :   static_assert(
    3969             :       kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
    3970             :       "more marker tasks than marking deque can handle");
    3971       61534 : }
    3972             : 
    3973      246074 : MinorMarkCompactCollector::~MinorMarkCompactCollector() {
    3974       61518 :   delete worklist_;
    3975       61519 :   delete main_marking_visitor_;
    3976      123037 : }
    3977             : 
    3978           0 : int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
    3979             :   DCHECK_GT(pages, 0);
    3980           0 :   if (!FLAG_minor_mc_parallel_marking) return 1;
    3981             :   // Pages are not private to markers but we can still use them to estimate the
    3982             :   // amount of marking that is required.
    3983             :   const int kPagesPerTask = 2;
    3984           0 :   const int wanted_tasks = Max(1, pages / kPagesPerTask);
    3985           0 :   return Min(NumberOfAvailableCores(),
    3986             :              Min(wanted_tasks,
    3987           0 :                  MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
    3988             : }
    3989             : 
    3990       73955 : void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
    3991       73955 :   for (Page* p : sweep_to_iterate_pages_) {
    3992           0 :     if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
    3993             :       p->ClearFlag(Page::SWEEP_TO_ITERATE);
    3994           0 :       non_atomic_marking_state()->ClearLiveness(p);
    3995             :     }
    3996             :   }
    3997             :   sweep_to_iterate_pages_.clear();
    3998       73955 : }
    3999             : 
    4000           0 : class YoungGenerationMigrationObserver final : public MigrationObserver {
    4001             :  public:
    4002             :   YoungGenerationMigrationObserver(Heap* heap,
    4003             :                                    MarkCompactCollector* mark_compact_collector)
    4004             :       : MigrationObserver(heap),
    4005           0 :         mark_compact_collector_(mark_compact_collector) {}
    4006             : 
    4007           0 :   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
    4008             :                    int size) final {
    4009             :     // Migrate color to old generation marking in case the object survived young
    4010             :     // generation garbage collection.
    4011           0 :     if (heap_->incremental_marking()->IsMarking()) {
    4012             :       DCHECK(
    4013             :           heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
    4014             :       heap_->incremental_marking()->TransferColor(src, dst);
    4015             :     }
    4016           0 :   }
    4017             : 
    4018             :  protected:
    4019             :   base::Mutex mutex_;
    4020             :   MarkCompactCollector* mark_compact_collector_;
    4021             : };
    4022             : 
    4023           0 : class YoungGenerationRecordMigratedSlotVisitor final
    4024             :     : public RecordMigratedSlotVisitor {
    4025             :  public:
    4026             :   explicit YoungGenerationRecordMigratedSlotVisitor(
    4027             :       MarkCompactCollector* collector)
    4028           0 :       : RecordMigratedSlotVisitor(collector) {}
    4029             : 
    4030           0 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
    4031           0 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
    4032           0 :     UNREACHABLE();
    4033             :   }
    4034             : 
    4035             :  private:
    4036             :   // Only record slots for host objects that are considered as live by the full
    4037             :   // collector.
    4038           0 :   inline bool IsLive(HeapObject object) {
    4039           0 :     return collector_->non_atomic_marking_state()->IsBlack(object);
    4040             :   }
    4041             : 
    4042           0 :   inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
    4043             :                                  Address slot) final {
    4044           0 :     if (value->IsStrongOrWeak()) {
    4045             :       MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
    4046           0 :       if (p->InYoungGeneration()) {
    4047             :         DCHECK_IMPLIES(
    4048             :             p->IsToPage(),
    4049             :             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
    4050             :         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
    4051           0 :             MemoryChunk::FromHeapObject(host), slot);
    4052           0 :       } else if (p->IsEvacuationCandidate() && IsLive(host)) {
    4053             :         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
    4054           0 :             MemoryChunk::FromHeapObject(host), slot);
    4055             :       }
    4056             :     }
    4057           0 :   }
    4058             : };
    4059             : 
    4060           0 : void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
    4061           0 :   TRACE_GC(heap()->tracer(),
    4062             :            GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
    4063             : 
    4064             :   PointersUpdatingVisitor updating_visitor;
    4065             :   ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
    4066           0 :                                &page_parallel_job_semaphore_);
    4067             : 
    4068           0 :   CollectNewSpaceArrayBufferTrackerItems(&updating_job);
    4069             :   // Create batches of global handles.
    4070           0 :   const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
    4071             :   int remembered_set_pages = 0;
    4072             :   remembered_set_pages += CollectRememberedSetUpdatingItems(
    4073             :       &updating_job, heap()->old_space(),
    4074           0 :       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
    4075             :   remembered_set_pages += CollectRememberedSetUpdatingItems(
    4076             :       &updating_job, heap()->code_space(),
    4077           0 :       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
    4078             :   remembered_set_pages += CollectRememberedSetUpdatingItems(
    4079             :       &updating_job, heap()->map_space(),
    4080           0 :       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
    4081             :   remembered_set_pages += CollectRememberedSetUpdatingItems(
    4082             :       &updating_job, heap()->lo_space(),
    4083           0 :       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
    4084             :   remembered_set_pages += CollectRememberedSetUpdatingItems(
    4085             :       &updating_job, heap()->code_lo_space(),
    4086           0 :       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
    4087             :   const int remembered_set_tasks =
    4088             :       remembered_set_pages == 0 ? 0
    4089           0 :                                 : NumberOfParallelPointerUpdateTasks(
    4090           0 :                                       remembered_set_pages, old_to_new_slots_);
    4091             :   const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
    4092           0 :   for (int i = 0; i < num_tasks; i++) {
    4093           0 :     updating_job.AddTask(new PointersUpdatingTask(
    4094             :         isolate(), GCTracer::BackgroundScope::
    4095           0 :                        MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
    4096             :   }
    4097             : 
    4098             :   {
    4099           0 :     TRACE_GC(heap()->tracer(),
    4100             :              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
    4101           0 :     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
    4102             :   }
    4103             :   {
    4104           0 :     TRACE_GC(heap()->tracer(),
    4105             :              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
    4106           0 :     updating_job.Run();
    4107           0 :     heap()->array_buffer_collector()->FreeAllocations();
    4108             :   }
    4109             : 
    4110             :   {
    4111           0 :     TRACE_GC(heap()->tracer(),
    4112             :              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
    4113             : 
    4114           0 :     EvacuationWeakObjectRetainer evacuation_object_retainer;
    4115           0 :     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
    4116             : 
    4117             :     // Update pointers from external string table.
    4118             :     heap()->UpdateYoungReferencesInExternalStringTable(
    4119           0 :         &UpdateReferenceInExternalStringTableEntry);
    4120             :   }
    4121           0 : }
    4122             : 
    4123           0 : class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
    4124             :  public:
    4125             :   explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
    4126           0 :       : collector_(collector) {}
    4127             : 
    4128           0 :   void VisitRootPointer(Root root, const char* description,
    4129             :                         FullObjectSlot p) final {
    4130             :     MarkObjectByPointer(p);
    4131           0 :   }
    4132             : 
    4133           0 :   void VisitRootPointers(Root root, const char* description,
    4134             :                          FullObjectSlot start, FullObjectSlot end) final {
    4135           0 :     for (FullObjectSlot p = start; p < end; ++p) {
    4136             :       MarkObjectByPointer(p);
    4137             :     }
    4138           0 :   }
    4139             : 
    4140             :  private:
    4141             :   V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
    4142           0 :     if (!(*p)->IsHeapObject()) return;
    4143           0 :     collector_->MarkRootObject(HeapObject::cast(*p));
    4144             :   }
    4145             :   MinorMarkCompactCollector* const collector_;
    4146             : };
    4147             : 
    4148           0 : void MinorMarkCompactCollector::CollectGarbage() {
    4149             :   {
    4150           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
    4151           0 :     heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
    4152           0 :     CleanupSweepToIteratePages();
    4153             :   }
    4154             : 
    4155           0 :   MarkLiveObjects();
    4156           0 :   ClearNonLiveReferences();
    4157             : #ifdef VERIFY_HEAP
    4158             :   if (FLAG_verify_heap) {
    4159             :     YoungGenerationMarkingVerifier verifier(heap());
    4160             :     verifier.Run();
    4161             :   }
    4162             : #endif  // VERIFY_HEAP
    4163             : 
    4164           0 :   Evacuate();
    4165             : #ifdef VERIFY_HEAP
    4166             :   if (FLAG_verify_heap) {
    4167             :     YoungGenerationEvacuationVerifier verifier(heap());
    4168             :     verifier.Run();
    4169             :   }
    4170             : #endif  // VERIFY_HEAP
    4171             : 
    4172             :   {
    4173           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
    4174           0 :     heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
    4175             :   }
    4176             : 
    4177             :   {
    4178           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
    4179           0 :     for (Page* p :
    4180             :          PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
    4181             :       DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
    4182           0 :       non_atomic_marking_state()->ClearLiveness(p);
    4183           0 :       if (FLAG_concurrent_marking) {
    4184             :         // Ensure that concurrent marker does not track pages that are
    4185             :         // going to be unmapped.
    4186           0 :         heap()->concurrent_marking()->ClearMemoryChunkData(p);
    4187             :       }
    4188             :     }
    4189             :     // Since we promote all surviving large objects immediatelly, all remaining
    4190             :     // large objects must be dead.
    4191             :     // TODO(ulan): Don't free all as soon as we have an intermediate generation.
    4192           0 :     heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
    4193             :   }
    4194             : 
    4195             :   RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
    4196           0 :       heap(), [](MemoryChunk* chunk) {
    4197           0 :         if (chunk->SweepingDone()) {
    4198           0 :           RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
    4199             :         } else {
    4200           0 :           RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
    4201             :         }
    4202           0 :       });
    4203             : 
    4204             :   heap()->account_external_memory_concurrently_freed();
    4205           0 : }
    4206             : 
    4207           0 : void MinorMarkCompactCollector::MakeIterable(
    4208             :     Page* p, MarkingTreatmentMode marking_mode,
    4209             :     FreeSpaceTreatmentMode free_space_mode) {
    4210           0 :   CHECK(!p->IsLargePage());
    4211             :   // We have to clear the full collectors markbits for the areas that we
    4212             :   // remove here.
    4213             :   MarkCompactCollector* full_collector = heap()->mark_compact_collector();
    4214             :   Address free_start = p->area_start();
    4215             : 
    4216           0 :   for (auto object_and_size :
    4217             :        LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
    4218           0 :     HeapObject const object = object_and_size.first;
    4219             :     DCHECK(non_atomic_marking_state()->IsGrey(object));
    4220             :     Address free_end = object->address();
    4221           0 :     if (free_end != free_start) {
    4222           0 :       CHECK_GT(free_end, free_start);
    4223           0 :       size_t size = static_cast<size_t>(free_end - free_start);
    4224           0 :       full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
    4225             :           p->AddressToMarkbitIndex(free_start),
    4226           0 :           p->AddressToMarkbitIndex(free_end));
    4227           0 :       if (free_space_mode == ZAP_FREE_SPACE) {
    4228             :         ZapCode(free_start, size);
    4229             :       }
    4230             :       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
    4231           0 :                                       ClearRecordedSlots::kNo);
    4232             :     }
    4233           0 :     Map map = object->synchronized_map();
    4234           0 :     int size = object->SizeFromMap(map);
    4235           0 :     free_start = free_end + size;
    4236             :   }
    4237             : 
    4238           0 :   if (free_start != p->area_end()) {
    4239           0 :     CHECK_GT(p->area_end(), free_start);
    4240           0 :     size_t size = static_cast<size_t>(p->area_end() - free_start);
    4241           0 :     full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
    4242             :         p->AddressToMarkbitIndex(free_start),
    4243           0 :         p->AddressToMarkbitIndex(p->area_end()));
    4244           0 :     if (free_space_mode == ZAP_FREE_SPACE) {
    4245             :       ZapCode(free_start, size);
    4246             :     }
    4247             :     p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
    4248           0 :                                     ClearRecordedSlots::kNo);
    4249             :   }
    4250             : 
    4251           0 :   if (marking_mode == MarkingTreatmentMode::CLEAR) {
    4252           0 :     non_atomic_marking_state()->ClearLiveness(p);
    4253             :     p->ClearFlag(Page::SWEEP_TO_ITERATE);
    4254             :   }
    4255           0 : }
    4256             : 
    4257             : namespace {
    4258             : 
    4259             : // Helper class for pruning the string table.
    4260           0 : class YoungGenerationExternalStringTableCleaner : public RootVisitor {
    4261             :  public:
    4262             :   YoungGenerationExternalStringTableCleaner(
    4263             :       MinorMarkCompactCollector* collector)
    4264             :       : heap_(collector->heap()),
    4265           0 :         marking_state_(collector->non_atomic_marking_state()) {}
    4266             : 
    4267           0 :   void VisitRootPointers(Root root, const char* description,
    4268             :                          FullObjectSlot start, FullObjectSlot end) override {
    4269             :     DCHECK_EQ(static_cast<int>(root),
    4270             :               static_cast<int>(Root::kExternalStringsTable));
    4271             :     // Visit all HeapObject pointers in [start, end).
    4272           0 :     for (FullObjectSlot p = start; p < end; ++p) {
    4273             :       Object o = *p;
    4274           0 :       if (o->IsHeapObject()) {
    4275             :         HeapObject heap_object = HeapObject::cast(o);
    4276           0 :         if (marking_state_->IsWhite(heap_object)) {
    4277           0 :           if (o->IsExternalString()) {
    4278           0 :             heap_->FinalizeExternalString(String::cast(*p));
    4279             :           } else {
    4280             :             // The original external string may have been internalized.
    4281             :             DCHECK(o->IsThinString());
    4282             :           }
    4283             :           // Set the entry to the_hole_value (as deleted).
    4284           0 :           p.store(ReadOnlyRoots(heap_).the_hole_value());
    4285             :         }
    4286             :       }
    4287             :     }
    4288           0 :   }
    4289             : 
    4290             :  private:
    4291             :   Heap* heap_;
    4292             :   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
    4293             : };
    4294             : 
    4295             : // Marked young generation objects and all old generation objects will be
    4296             : // retained.
    4297           0 : class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
    4298             :  public:
    4299             :   explicit MinorMarkCompactWeakObjectRetainer(
    4300             :       MinorMarkCompactCollector* collector)
    4301           0 :       : marking_state_(collector->non_atomic_marking_state()) {}
    4302             : 
    4303           0 :   Object RetainAs(Object object) override {
    4304             :     HeapObject heap_object = HeapObject::cast(object);
    4305           0 :     if (!Heap::InYoungGeneration(heap_object)) return object;
    4306             : 
    4307             :     // Young generation marking only marks to grey instead of black.
    4308             :     DCHECK(!marking_state_->IsBlack(heap_object));
    4309           0 :     if (marking_state_->IsGrey(heap_object)) {
    4310           0 :       return object;
    4311             :     }
    4312           0 :     return Object();
    4313             :   }
    4314             : 
    4315             :  private:
    4316             :   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
    4317             : };
    4318             : 
    4319             : }  // namespace
    4320             : 
    4321           0 : void MinorMarkCompactCollector::ClearNonLiveReferences() {
    4322           0 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
    4323             : 
    4324             :   {
    4325           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
    4326             :     // Internalized strings are always stored in old space, so there is no need
    4327             :     // to clean them here.
    4328             :     YoungGenerationExternalStringTableCleaner external_visitor(this);
    4329           0 :     heap()->external_string_table_.IterateYoung(&external_visitor);
    4330           0 :     heap()->external_string_table_.CleanUpYoung();
    4331             :   }
    4332             : 
    4333             :   {
    4334           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
    4335             :     // Process the weak references.
    4336             :     MinorMarkCompactWeakObjectRetainer retainer(this);
    4337           0 :     heap()->ProcessYoungWeakReferences(&retainer);
    4338             :   }
    4339           0 : }
    4340             : 
    4341           0 : void MinorMarkCompactCollector::EvacuatePrologue() {
    4342             :   NewSpace* new_space = heap()->new_space();
    4343             :   // Append the list of new space pages to be processed.
    4344           0 :   for (Page* p :
    4345           0 :        PageRange(new_space->first_allocatable_address(), new_space->top())) {
    4346           0 :     new_space_evacuation_pages_.push_back(p);
    4347             :   }
    4348           0 :   new_space->Flip();
    4349           0 :   new_space->ResetLinearAllocationArea();
    4350             : 
    4351           0 :   heap()->new_lo_space()->Flip();
    4352             :   heap()->new_lo_space()->ResetPendingObject();
    4353           0 : }
    4354             : 
    4355           0 : void MinorMarkCompactCollector::EvacuateEpilogue() {
    4356             :   heap()->new_space()->set_age_mark(heap()->new_space()->top());
    4357             :   // Give pages that are queued to be freed back to the OS.
    4358           0 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    4359           0 : }
    4360             : 
    4361           0 : UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
    4362             :     MemoryChunk* chunk, Address start, Address end) {
    4363             :   return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
    4364           0 :       chunk, start, end, non_atomic_marking_state());
    4365             : }
    4366             : 
    4367           0 : UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
    4368             :     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
    4369             :   return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
    4370           0 :       heap(), non_atomic_marking_state(), chunk, updating_mode);
    4371             : }
    4372             : 
    4373             : class MarkingItem;
    4374             : class PageMarkingItem;
    4375             : class RootMarkingItem;
    4376             : class YoungGenerationMarkingTask;
    4377             : 
    4378           0 : class MarkingItem : public ItemParallelJob::Item {
    4379             :  public:
    4380           0 :   ~MarkingItem() override = default;
    4381             :   virtual void Process(YoungGenerationMarkingTask* task) = 0;
    4382             : };
    4383             : 
    4384           0 : class YoungGenerationMarkingTask : public ItemParallelJob::Task {
    4385             :  public:
    4386           0 :   YoungGenerationMarkingTask(
    4387             :       Isolate* isolate, MinorMarkCompactCollector* collector,
    4388             :       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
    4389             :       : ItemParallelJob::Task(isolate),
    4390             :         collector_(collector),
    4391             :         marking_worklist_(global_worklist, task_id),
    4392             :         marking_state_(collector->marking_state()),
    4393           0 :         visitor_(marking_state_, global_worklist, task_id) {
    4394           0 :     local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
    4395             :                               Page::kPageSize);
    4396           0 :   }
    4397             : 
    4398           0 :   void RunInParallel() override {
    4399           0 :     TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
    4400             :                         GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
    4401           0 :     double marking_time = 0.0;
    4402             :     {
    4403             :       TimedScope scope(&marking_time);
    4404             :       MarkingItem* item = nullptr;
    4405           0 :       while ((item = GetItem<MarkingItem>()) != nullptr) {
    4406           0 :         item->Process(this);
    4407           0 :         item->MarkFinished();
    4408           0 :         EmptyLocalMarkingWorklist();
    4409             :       }
    4410           0 :       EmptyMarkingWorklist();
    4411             :       DCHECK(marking_worklist_.IsLocalEmpty());
    4412           0 :       FlushLiveBytes();
    4413             :     }
    4414           0 :     if (FLAG_trace_minor_mc_parallel_marking) {
    4415           0 :       PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
    4416           0 :                    static_cast<void*>(this), marking_time);
    4417             :     }
    4418           0 :   }
    4419             : 
    4420           0 :   void MarkObject(Object object) {
    4421           0 :     if (!Heap::InYoungGeneration(object)) return;
    4422             :     HeapObject heap_object = HeapObject::cast(object);
    4423           0 :     if (marking_state_->WhiteToGrey(heap_object)) {
    4424             :       const int size = visitor_.Visit(heap_object);
    4425           0 :       IncrementLiveBytes(heap_object, size);
    4426             :     }
    4427             :   }
    4428             : 
    4429             :  private:
    4430           0 :   void EmptyLocalMarkingWorklist() {
    4431           0 :     HeapObject object;
    4432           0 :     while (marking_worklist_.Pop(&object)) {
    4433             :       const int size = visitor_.Visit(object);
    4434           0 :       IncrementLiveBytes(object, size);
    4435             :     }
    4436           0 :   }
    4437             : 
    4438           0 :   void EmptyMarkingWorklist() {
    4439           0 :     HeapObject object;
    4440           0 :     while (marking_worklist_.Pop(&object)) {
    4441             :       const int size = visitor_.Visit(object);
    4442           0 :       IncrementLiveBytes(object, size);
    4443             :     }
    4444           0 :   }
    4445             : 
    4446             :   void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
    4447           0 :     local_live_bytes_[Page::FromHeapObject(object)] += bytes;
    4448             :   }
    4449             : 
    4450           0 :   void FlushLiveBytes() {
    4451           0 :     for (auto pair : local_live_bytes_) {
    4452             :       marking_state_->IncrementLiveBytes(pair.first, pair.second);
    4453             :     }
    4454           0 :   }
    4455             : 
    4456             :   MinorMarkCompactCollector* collector_;
    4457             :   MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
    4458             :   MinorMarkCompactCollector::MarkingState* marking_state_;
    4459             :   YoungGenerationMarkingVisitor visitor_;
    4460             :   std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
    4461             : };
    4462             : 
    4463             : class PageMarkingItem : public MarkingItem {
    4464             :  public:
    4465             :   explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
    4466           0 :       : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
    4467           0 :   ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
    4468             : 
    4469           0 :   void Process(YoungGenerationMarkingTask* task) override {
    4470           0 :     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    4471             :                  "PageMarkingItem::Process");
    4472           0 :     base::MutexGuard guard(chunk_->mutex());
    4473             :     MarkUntypedPointers(task);
    4474             :     MarkTypedPointers(task);
    4475           0 :   }
    4476             : 
    4477             :  private:
    4478             :   inline Heap* heap() { return chunk_->heap(); }
    4479             : 
    4480             :   void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
    4481           0 :     RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
    4482           0 :                                        [this, task](MaybeObjectSlot slot) {
    4483             :                                          return CheckAndMarkObject(task, slot);
    4484           0 :                                        },
    4485           0 :                                        SlotSet::PREFREE_EMPTY_BUCKETS);
    4486             :   }
    4487             : 
    4488             :   void MarkTypedPointers(YoungGenerationMarkingTask* task) {
    4489           0 :     RememberedSet<OLD_TO_NEW>::IterateTyped(
    4490             :         chunk_, [=](SlotType slot_type, Address slot) {
    4491           0 :           return UpdateTypedSlotHelper::UpdateTypedSlot(
    4492           0 :               heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
    4493             :                 return CheckAndMarkObject(task, slot);
    4494           0 :               });
    4495           0 :         });
    4496             :   }
    4497             : 
    4498             :   template <typename TSlot>
    4499             :   V8_INLINE SlotCallbackResult
    4500             :   CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
    4501             :     static_assert(
    4502             :         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
    4503             :             std::is_same<TSlot, MaybeObjectSlot>::value,
    4504             :         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
    4505             :     MaybeObject object = *slot;
    4506           0 :     if (Heap::InYoungGeneration(object)) {
    4507             :       // Marking happens before flipping the young generation, so the object
    4508             :       // has to be in a to page.
    4509             :       DCHECK(Heap::InToPage(object));
    4510           0 :       HeapObject heap_object;
    4511             :       bool success = object.GetHeapObject(&heap_object);
    4512             :       USE(success);
    4513             :       DCHECK(success);
    4514           0 :       task->MarkObject(heap_object);
    4515           0 :       slots_++;
    4516             :       return KEEP_SLOT;
    4517             :     }
    4518             :     return REMOVE_SLOT;
    4519             :   }
    4520             : 
    4521             :   MemoryChunk* chunk_;
    4522             :   std::atomic<int>* global_slots_;
    4523             :   int slots_;
    4524             : };
    4525             : 
    4526           0 : void MinorMarkCompactCollector::MarkRootSetInParallel(
    4527             :     RootMarkingVisitor* root_visitor) {
    4528             :   std::atomic<int> slots;
    4529             :   {
    4530             :     ItemParallelJob job(isolate()->cancelable_task_manager(),
    4531           0 :                         &page_parallel_job_semaphore_);
    4532             : 
    4533             :     // Seed the root set (roots + old->new set).
    4534             :     {
    4535           0 :       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
    4536             :       isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
    4537           0 :           &JSObject::IsUnmodifiedApiObject);
    4538           0 :       heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
    4539             :       // Create items for each page.
    4540             :       RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
    4541           0 :           heap(), [&job, &slots](MemoryChunk* chunk) {
    4542           0 :             job.AddItem(new PageMarkingItem(chunk, &slots));
    4543           0 :           });
    4544             :     }
    4545             : 
    4546             :     // Add tasks and run in parallel.
    4547             :     {
    4548           0 :       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
    4549             :       const int new_space_pages =
    4550           0 :           static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
    4551           0 :       const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
    4552           0 :       for (int i = 0; i < num_tasks; i++) {
    4553           0 :         job.AddTask(
    4554           0 :             new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
    4555             :       }
    4556           0 :       job.Run();
    4557             :       DCHECK(worklist()->IsEmpty());
    4558             :     }
    4559             :   }
    4560           0 :   old_to_new_slots_ = slots;
    4561           0 : }
    4562             : 
    4563           0 : void MinorMarkCompactCollector::MarkLiveObjects() {
    4564           0 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
    4565             : 
    4566             :   PostponeInterruptsScope postpone(isolate());
    4567             : 
    4568             :   RootMarkingVisitor root_visitor(this);
    4569             : 
    4570           0 :   MarkRootSetInParallel(&root_visitor);
    4571             : 
    4572             :   // Mark rest on the main thread.
    4573             :   {
    4574           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
    4575           0 :     ProcessMarkingWorklist();
    4576             :   }
    4577             : 
    4578             :   {
    4579           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
    4580             :     isolate()->global_handles()->MarkYoungWeakUnmodifiedObjectsPending(
    4581           0 :         &IsUnmarkedObjectForYoungGeneration);
    4582             :     isolate()->global_handles()->IterateYoungWeakUnmodifiedRootsForFinalizers(
    4583           0 :         &root_visitor);
    4584             :     isolate()
    4585             :         ->global_handles()
    4586             :         ->IterateYoungWeakUnmodifiedRootsForPhantomHandles(
    4587           0 :             &root_visitor, &IsUnmarkedObjectForYoungGeneration);
    4588           0 :     ProcessMarkingWorklist();
    4589             :   }
    4590           0 : }
    4591             : 
    4592           0 : void MinorMarkCompactCollector::ProcessMarkingWorklist() {
    4593             :   MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
    4594           0 :   HeapObject object;
    4595           0 :   while (marking_worklist.Pop(&object)) {
    4596             :     DCHECK(!object->IsFiller());
    4597             :     DCHECK(object->IsHeapObject());
    4598             :     DCHECK(heap()->Contains(object));
    4599             :     DCHECK(non_atomic_marking_state()->IsGrey(object));
    4600             :     main_marking_visitor()->Visit(object);
    4601             :   }
    4602             :   DCHECK(marking_worklist.IsLocalEmpty());
    4603           0 : }
    4604             : 
    4605           0 : void MinorMarkCompactCollector::Evacuate() {
    4606           0 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
    4607             :   base::MutexGuard guard(heap()->relocation_mutex());
    4608             : 
    4609             :   {
    4610           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
    4611           0 :     EvacuatePrologue();
    4612             :   }
    4613             : 
    4614             :   {
    4615           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
    4616           0 :     EvacuatePagesInParallel();
    4617             :   }
    4618             : 
    4619           0 :   UpdatePointersAfterEvacuation();
    4620             : 
    4621             :   {
    4622           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
    4623           0 :     if (!heap()->new_space()->Rebalance()) {
    4624           0 :       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
    4625             :     }
    4626             :   }
    4627             : 
    4628             :   {
    4629           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
    4630           0 :     for (Page* p : new_space_evacuation_pages_) {
    4631           0 :       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
    4632             :           p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
    4633             :         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
    4634             :         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
    4635             :         p->SetFlag(Page::SWEEP_TO_ITERATE);
    4636           0 :         sweep_to_iterate_pages_.push_back(p);
    4637             :       }
    4638             :     }
    4639             :     new_space_evacuation_pages_.clear();
    4640             :   }
    4641             : 
    4642             :   {
    4643           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
    4644           0 :     EvacuateEpilogue();
    4645             :   }
    4646           0 : }
    4647             : 
    4648             : namespace {
    4649             : 
    4650           0 : class YoungGenerationEvacuator : public Evacuator {
    4651             :  public:
    4652             :   YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
    4653             :                            RecordMigratedSlotVisitor* record_visitor)
    4654           0 :       : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
    4655             : 
    4656           0 :   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
    4657           0 :     return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
    4658             :   }
    4659             : 
    4660             :  protected:
    4661             :   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
    4662             : 
    4663             :   MinorMarkCompactCollector* collector_;
    4664             : };
    4665             : 
    4666           0 : void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
    4667             :                                                intptr_t* live_bytes) {
    4668           0 :   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
    4669             :                "YoungGenerationEvacuator::RawEvacuatePage");
    4670             :   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
    4671           0 :       collector_->non_atomic_marking_state();
    4672           0 :   *live_bytes = marking_state->live_bytes(chunk);
    4673           0 :   switch (ComputeEvacuationMode(chunk)) {
    4674             :     case kObjectsNewToOld:
    4675           0 :       LiveObjectVisitor::VisitGreyObjectsNoFail(
    4676             :           chunk, marking_state, &new_space_visitor_,
    4677           0 :           LiveObjectVisitor::kClearMarkbits);
    4678             :       // ArrayBufferTracker will be updated during pointers updating.
    4679           0 :       break;
    4680             :     case kPageNewToOld:
    4681           0 :       LiveObjectVisitor::VisitGreyObjectsNoFail(
    4682             :           chunk, marking_state, &new_to_old_page_visitor_,
    4683           0 :           LiveObjectVisitor::kKeepMarking);
    4684             :       new_to_old_page_visitor_.account_moved_bytes(
    4685             :           marking_state->live_bytes(chunk));
    4686           0 :       if (!chunk->IsLargePage()) {
    4687             :         // TODO(mlippautz): If cleaning array buffers is too slow here we can
    4688             :         // delay it until the next GC.
    4689           0 :         ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
    4690             :         if (heap()->ShouldZapGarbage()) {
    4691             :           collector_->MakeIterable(static_cast<Page*>(chunk),
    4692             :                                    MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
    4693           0 :         } else if (heap()->incremental_marking()->IsMarking()) {
    4694             :           // When incremental marking is on, we need to clear the mark bits of
    4695             :           // the full collector. We cannot yet discard the young generation mark
    4696             :           // bits as they are still relevant for pointers updating.
    4697           0 :           collector_->MakeIterable(static_cast<Page*>(chunk),
    4698             :                                    MarkingTreatmentMode::KEEP,
    4699           0 :                                    IGNORE_FREE_SPACE);
    4700             :         }
    4701             :       }
    4702             :       break;
    4703             :     case kPageNewToNew:
    4704           0 :       LiveObjectVisitor::VisitGreyObjectsNoFail(
    4705             :           chunk, marking_state, &new_to_new_page_visitor_,
    4706           0 :           LiveObjectVisitor::kKeepMarking);
    4707             :       new_to_new_page_visitor_.account_moved_bytes(
    4708             :           marking_state->live_bytes(chunk));
    4709             :       DCHECK(!chunk->IsLargePage());
    4710             :       // TODO(mlippautz): If cleaning array buffers is too slow here we can
    4711             :       // delay it until the next GC.
    4712           0 :       ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
    4713             :       if (heap()->ShouldZapGarbage()) {
    4714             :         collector_->MakeIterable(static_cast<Page*>(chunk),
    4715             :                                  MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
    4716           0 :       } else if (heap()->incremental_marking()->IsMarking()) {
    4717             :         // When incremental marking is on, we need to clear the mark bits of
    4718             :         // the full collector. We cannot yet discard the young generation mark
    4719             :         // bits as they are still relevant for pointers updating.
    4720           0 :         collector_->MakeIterable(static_cast<Page*>(chunk),
    4721           0 :                                  MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
    4722             :       }
    4723             :       break;
    4724             :     case kObjectsOldToOld:
    4725           0 :       UNREACHABLE();
    4726             :       break;
    4727             :   }
    4728           0 : }
    4729             : 
    4730             : }  // namespace
    4731             : 
    4732           0 : void MinorMarkCompactCollector::EvacuatePagesInParallel() {
    4733             :   ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
    4734           0 :                                  &page_parallel_job_semaphore_);
    4735             :   intptr_t live_bytes = 0;
    4736             : 
    4737           0 :   for (Page* page : new_space_evacuation_pages_) {
    4738             :     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
    4739           0 :     if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
    4740           0 :     live_bytes += live_bytes_on_page;
    4741           0 :     if (ShouldMovePage(page, live_bytes_on_page)) {
    4742           0 :       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
    4743           0 :         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
    4744             :       } else {
    4745           0 :         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
    4746             :       }
    4747             :     }
    4748           0 :     evacuation_job.AddItem(new EvacuationItem(page));
    4749             :   }
    4750             : 
    4751             :   // Promote young generation large objects.
    4752           0 :   for (auto it = heap()->new_lo_space()->begin();
    4753             :        it != heap()->new_lo_space()->end();) {
    4754             :     LargePage* current = *it;
    4755             :     it++;
    4756             :     HeapObject object = current->GetObject();
    4757             :     DCHECK(!non_atomic_marking_state_.IsBlack(object));
    4758           0 :     if (non_atomic_marking_state_.IsGrey(object)) {
    4759           0 :       heap_->lo_space()->PromoteNewLargeObject(current);
    4760             :       current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
    4761           0 :       evacuation_job.AddItem(new EvacuationItem(current));
    4762             :     }
    4763             :   }
    4764           0 :   if (evacuation_job.NumberOfItems() == 0) return;
    4765             : 
    4766             :   YoungGenerationMigrationObserver observer(heap(),
    4767             :                                             heap()->mark_compact_collector());
    4768             :   YoungGenerationRecordMigratedSlotVisitor record_visitor(
    4769             :       heap()->mark_compact_collector());
    4770           0 :   CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
    4771           0 :       this, &evacuation_job, &record_visitor, &observer, live_bytes);
    4772             : }
    4773             : 
    4774           0 : int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
    4775             :     ItemParallelJob* job) {
    4776             :   int pages = 0;
    4777           0 :   for (Page* p : new_space_evacuation_pages_) {
    4778           0 :     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
    4779           0 :       if (p->local_tracker() == nullptr) continue;
    4780             : 
    4781           0 :       pages++;
    4782           0 :       job->AddItem(new ArrayBufferTrackerUpdatingItem(
    4783             :           p, ArrayBufferTrackerUpdatingItem::kRegular));
    4784             :     }
    4785             :   }
    4786           0 :   return pages;
    4787             : }
    4788             : 
    4789             : #endif  // ENABLE_MINOR_MC
    4790             : 
    4791             : }  // namespace internal
    4792      120216 : }  // namespace v8

Generated by: LCOV version 1.10