LCOV - code coverage report
Current view: top level - src/heap - mark-compact.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1217 1448 84.0 %
Date: 2017-04-26 Functions: 156 236 66.1 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/mark-compact.h"
       6             : 
       7             : #include "src/base/atomicops.h"
       8             : #include "src/base/bits.h"
       9             : #include "src/base/sys-info.h"
      10             : #include "src/code-stubs.h"
      11             : #include "src/compilation-cache.h"
      12             : #include "src/deoptimizer.h"
      13             : #include "src/execution.h"
      14             : #include "src/frames-inl.h"
      15             : #include "src/gdb-jit.h"
      16             : #include "src/global-handles.h"
      17             : #include "src/heap/array-buffer-tracker.h"
      18             : #include "src/heap/concurrent-marking.h"
      19             : #include "src/heap/gc-tracer.h"
      20             : #include "src/heap/incremental-marking.h"
      21             : #include "src/heap/mark-compact-inl.h"
      22             : #include "src/heap/object-stats.h"
      23             : #include "src/heap/objects-visiting-inl.h"
      24             : #include "src/heap/objects-visiting.h"
      25             : #include "src/heap/page-parallel-job.h"
      26             : #include "src/heap/spaces-inl.h"
      27             : #include "src/ic/ic.h"
      28             : #include "src/ic/stub-cache.h"
      29             : #include "src/tracing/tracing-category-observer.h"
      30             : #include "src/utils-inl.h"
      31             : #include "src/v8.h"
      32             : #include "src/v8threads.h"
      33             : 
      34             : namespace v8 {
      35             : namespace internal {
      36             : 
      37             : 
      38             : const char* Marking::kWhiteBitPattern = "00";
      39             : const char* Marking::kBlackBitPattern = "11";
      40             : const char* Marking::kGreyBitPattern = "10";
      41             : const char* Marking::kImpossibleBitPattern = "01";
      42             : 
      43             : // The following has to hold in order for {ObjectMarking::MarkBitFrom} to not
      44             : // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
      45             : STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
      46             : 
      47             : // =============================================================================
      48             : // Verifiers
      49             : // =============================================================================
      50             : 
      51             : #ifdef VERIFY_HEAP
      52             : namespace {
      53             : 
      54             : class MarkingVerifier : public ObjectVisitor, public RootVisitor {
      55             :  public:
      56             :   virtual void Run() = 0;
      57             : 
      58             :  protected:
      59             :   explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
      60             : 
      61             :   virtual MarkingState marking_state(MemoryChunk* chunk) = 0;
      62             : 
      63             :   virtual void VerifyPointers(Object** start, Object** end) = 0;
      64             : 
      65             :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
      66             :     VerifyPointers(start, end);
      67             :   }
      68             : 
      69             :   void VisitRootPointers(Root root, Object** start, Object** end) override {
      70             :     VerifyPointers(start, end);
      71             :   }
      72             : 
      73             :   void VerifyRoots(VisitMode mode);
      74             :   void VerifyMarkingOnPage(const Page& page, const MarkingState& state,
      75             :                            Address start, Address end);
      76             :   void VerifyMarking(NewSpace* new_space);
      77             :   void VerifyMarking(PagedSpace* paged_space);
      78             : 
      79             :   Heap* heap_;
      80             : };
      81             : 
      82             : void MarkingVerifier::VerifyRoots(VisitMode mode) {
      83             :   heap_->IterateStrongRoots(this, mode);
      84             : }
      85             : 
      86             : void MarkingVerifier::VerifyMarkingOnPage(const Page& page,
      87             :                                           const MarkingState& state,
      88             :                                           Address start, Address end) {
      89             :   HeapObject* object;
      90             :   Address next_object_must_be_here_or_later = start;
      91             :   for (Address current = start; current < end;) {
      92             :     object = HeapObject::FromAddress(current);
      93             :     // One word fillers at the end of a black area can be grey.
      94             :     if (ObjectMarking::IsBlackOrGrey(object, state) &&
      95             :         object->map() != heap_->one_pointer_filler_map()) {
      96             :       CHECK(ObjectMarking::IsBlack(object, state));
      97             :       CHECK(current >= next_object_must_be_here_or_later);
      98             :       object->Iterate(this);
      99             :       next_object_must_be_here_or_later = current + object->Size();
     100             :       // The object is either part of a black area of black allocation or a
     101             :       // regular black object
     102             :       CHECK(
     103             :           state.bitmap()->AllBitsSetInRange(
     104             :               page.AddressToMarkbitIndex(current),
     105             :               page.AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
     106             :           state.bitmap()->AllBitsClearInRange(
     107             :               page.AddressToMarkbitIndex(current + kPointerSize * 2),
     108             :               page.AddressToMarkbitIndex(next_object_must_be_here_or_later)));
     109             :       current = next_object_must_be_here_or_later;
     110             :     } else {
     111             :       current += kPointerSize;
     112             :     }
     113             :   }
     114             : }
     115             : 
     116             : void MarkingVerifier::VerifyMarking(NewSpace* space) {
     117             :   Address end = space->top();
     118             :   // The bottom position is at the start of its page. Allows us to use
     119             :   // page->area_start() as start of range on all pages.
     120             :   CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
     121             : 
     122             :   PageRange range(space->bottom(), end);
     123             :   for (auto it = range.begin(); it != range.end();) {
     124             :     Page* page = *(it++);
     125             :     Address limit = it != range.end() ? page->area_end() : end;
     126             :     CHECK(limit == end || !page->Contains(end));
     127             :     VerifyMarkingOnPage(*page, marking_state(page), page->area_start(), limit);
     128             :   }
     129             : }
     130             : 
     131             : void MarkingVerifier::VerifyMarking(PagedSpace* space) {
     132             :   for (Page* p : *space) {
     133             :     VerifyMarkingOnPage(*p, marking_state(p), p->area_start(), p->area_end());
     134             :   }
     135             : }
     136             : 
     137             : class FullMarkingVerifier : public MarkingVerifier {
     138             :  public:
     139             :   explicit FullMarkingVerifier(Heap* heap) : MarkingVerifier(heap) {}
     140             : 
     141             :   void Run() override {
     142             :     VerifyRoots(VISIT_ONLY_STRONG);
     143             :     VerifyMarking(heap_->new_space());
     144             :     VerifyMarking(heap_->old_space());
     145             :     VerifyMarking(heap_->code_space());
     146             :     VerifyMarking(heap_->map_space());
     147             : 
     148             :     LargeObjectIterator it(heap_->lo_space());
     149             :     for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     150             :       if (ObjectMarking::IsBlackOrGrey(obj, marking_state(obj))) {
     151             :         obj->Iterate(this);
     152             :       }
     153             :     }
     154             :   }
     155             : 
     156             :  protected:
     157             :   MarkingState marking_state(MemoryChunk* chunk) override {
     158             :     return MarkingState::Internal(chunk);
     159             :   }
     160             : 
     161             :   MarkingState marking_state(HeapObject* object) {
     162             :     return MarkingState::Internal(object);
     163             :   }
     164             : 
     165             :   void VerifyPointers(Object** start, Object** end) override {
     166             :     for (Object** current = start; current < end; current++) {
     167             :       if ((*current)->IsHeapObject()) {
     168             :         HeapObject* object = HeapObject::cast(*current);
     169             :         CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object)));
     170             :       }
     171             :     }
     172             :   }
     173             : 
     174             :   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
     175             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     176             :     if (!host->IsWeakObject(rinfo->target_object())) {
     177             :       Object* p = rinfo->target_object();
     178             :       VisitPointer(host, &p);
     179             :     }
     180             :   }
     181             : 
     182             :   void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
     183             :     DCHECK(rinfo->rmode() == RelocInfo::CELL);
     184             :     if (!host->IsWeakObject(rinfo->target_cell())) {
     185             :       ObjectVisitor::VisitCellPointer(host, rinfo);
     186             :     }
     187             :   }
     188             : };
     189             : 
     190             : class YoungGenerationMarkingVerifier : public MarkingVerifier {
     191             :  public:
     192             :   explicit YoungGenerationMarkingVerifier(Heap* heap) : MarkingVerifier(heap) {}
     193             : 
     194             :   MarkingState marking_state(MemoryChunk* chunk) override {
     195             :     return MarkingState::External(chunk);
     196             :   }
     197             : 
     198             :   MarkingState marking_state(HeapObject* object) {
     199             :     return MarkingState::External(object);
     200             :   }
     201             : 
     202             :   void Run() override {
     203             :     VerifyRoots(VISIT_ALL_IN_SCAVENGE);
     204             :     VerifyMarking(heap_->new_space());
     205             :   }
     206             : 
     207             :   void VerifyPointers(Object** start, Object** end) override {
     208             :     for (Object** current = start; current < end; current++) {
     209             :       if ((*current)->IsHeapObject()) {
     210             :         HeapObject* object = HeapObject::cast(*current);
     211             :         if (!heap_->InNewSpace(object)) return;
     212             :         CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object)));
     213             :       }
     214             :     }
     215             :   }
     216             : };
     217             : 
     218             : class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
     219             :  public:
     220             :   virtual void Run() = 0;
     221             : 
     222             :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
     223             :     VerifyPointers(start, end);
     224             :   }
     225             : 
     226             :   void VisitRootPointers(Root root, Object** start, Object** end) override {
     227             :     VerifyPointers(start, end);
     228             :   }
     229             : 
     230             :  protected:
     231             :   explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
     232             : 
     233             :   virtual void VerifyPointers(Object** start, Object** end) = 0;
     234             : 
     235             :   void VerifyRoots(VisitMode mode);
     236             :   void VerifyEvacuationOnPage(Address start, Address end);
     237             :   void VerifyEvacuation(NewSpace* new_space);
     238             :   void VerifyEvacuation(PagedSpace* paged_space);
     239             : 
     240             :   Heap* heap_;
     241             : };
     242             : 
     243             : void EvacuationVerifier::VerifyRoots(VisitMode mode) {
     244             :   heap_->IterateStrongRoots(this, mode);
     245             : }
     246             : 
     247             : void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
     248             :   Address current = start;
     249             :   while (current < end) {
     250             :     HeapObject* object = HeapObject::FromAddress(current);
     251             :     if (!object->IsFiller()) object->Iterate(this);
     252             :     current += object->Size();
     253             :   }
     254             : }
     255             : 
     256             : void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
     257             :   PageRange range(space->bottom(), space->top());
     258             :   for (auto it = range.begin(); it != range.end();) {
     259             :     Page* page = *(it++);
     260             :     Address current = page->area_start();
     261             :     Address limit = it != range.end() ? page->area_end() : space->top();
     262             :     CHECK(limit == space->top() || !page->Contains(space->top()));
     263             :     VerifyEvacuationOnPage(current, limit);
     264             :   }
     265             : }
     266             : 
     267             : void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
     268             :   for (Page* p : *space) {
     269             :     if (p->IsEvacuationCandidate()) continue;
     270             :     if (p->Contains(space->top()))
     271             :       heap_->CreateFillerObjectAt(
     272             :           space->top(), static_cast<int>(space->limit() - space->top()),
     273             :           ClearRecordedSlots::kNo);
     274             : 
     275             :     VerifyEvacuationOnPage(p->area_start(), p->area_end());
     276             :   }
     277             : }
     278             : 
     279             : class FullEvacuationVerifier : public EvacuationVerifier {
     280             :  public:
     281             :   explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
     282             : 
     283             :   void Run() override {
     284             :     VerifyRoots(VISIT_ALL);
     285             :     VerifyEvacuation(heap_->new_space());
     286             :     VerifyEvacuation(heap_->old_space());
     287             :     VerifyEvacuation(heap_->code_space());
     288             :     VerifyEvacuation(heap_->map_space());
     289             :   }
     290             : 
     291             :  protected:
     292             :   void VerifyPointers(Object** start, Object** end) override {
     293             :     for (Object** current = start; current < end; current++) {
     294             :       if ((*current)->IsHeapObject()) {
     295             :         HeapObject* object = HeapObject::cast(*current);
     296             :         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
     297             :       }
     298             :     }
     299             :   }
     300             : };
     301             : 
     302             : }  // namespace
     303             : #endif  // VERIFY_HEAP
     304             : 
     305             : // =============================================================================
     306             : // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
     307             : // =============================================================================
     308             : 
     309       53346 : int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(
     310       53159 :     int pages, intptr_t live_bytes) {
     311       53346 :   if (!FLAG_parallel_compaction) return 1;
     312             :   // Compute the number of needed tasks based on a target compaction time, the
     313             :   // profiled compaction speed and marked live memory.
     314             :   //
     315             :   // The number of parallel compaction tasks is limited by:
     316             :   // - #evacuation pages
     317             :   // - #cores
     318             :   const double kTargetCompactionTimeInMs = .5;
     319             : 
     320             :   double compaction_speed =
     321       53159 :       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
     322             : 
     323             :   const int available_cores = Max(
     324             :       1, static_cast<int>(
     325       53159 :              V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
     326             :   int tasks;
     327       53159 :   if (compaction_speed > 0) {
     328       43175 :     tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
     329       43175 :                                  kTargetCompactionTimeInMs);
     330             :   } else {
     331             :     tasks = pages;
     332             :   }
     333             :   const int tasks_capped_pages = Min(pages, tasks);
     334       53159 :   return Min(available_cores, tasks_capped_pages);
     335             : }
     336             : 
     337       60782 : MarkCompactCollector::MarkCompactCollector(Heap* heap)
     338             :     : MarkCompactCollectorBase(heap),
     339             :       page_parallel_job_semaphore_(0),
     340             : #ifdef DEBUG
     341             :       state_(IDLE),
     342             : #endif
     343             :       was_marked_incrementally_(false),
     344             :       evacuation_(false),
     345             :       compacting_(false),
     346             :       black_allocation_(false),
     347             :       have_code_to_deoptimize_(false),
     348             :       marking_deque_(heap),
     349             :       code_flusher_(nullptr),
     350      182346 :       sweeper_(heap) {
     351       60782 : }
     352             : 
     353       60782 : void MarkCompactCollector::SetUp() {
     354             :   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
     355             :   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
     356             :   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
     357             :   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
     358       60782 :   marking_deque()->SetUp();
     359             : 
     360       60782 :   if (FLAG_flush_code) {
     361      121564 :     code_flusher_ = new CodeFlusher(isolate());
     362       60782 :     if (FLAG_trace_code_flushing) {
     363           0 :       PrintF("[code-flushing is now on]\n");
     364             :     }
     365             :   }
     366       60782 : }
     367             : 
     368           0 : void MinorMarkCompactCollector::SetUp() { marking_deque()->SetUp(); }
     369             : 
     370       59285 : void MarkCompactCollector::TearDown() {
     371       59285 :   AbortCompaction();
     372       59285 :   marking_deque()->TearDown();
     373       59285 :   delete code_flusher_;
     374       59285 : }
     375             : 
     376           0 : void MinorMarkCompactCollector::TearDown() { marking_deque()->TearDown(); }
     377             : 
     378           0 : void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
     379             :   DCHECK(!p->NeverEvacuate());
     380        9700 :   p->MarkEvacuationCandidate();
     381        9700 :   evacuation_candidates_.Add(p);
     382           0 : }
     383             : 
     384             : 
     385           0 : static void TraceFragmentation(PagedSpace* space) {
     386           0 :   int number_of_pages = space->CountTotalPages();
     387           0 :   intptr_t reserved = (number_of_pages * space->AreaSize());
     388           0 :   intptr_t free = reserved - space->SizeOfObjects();
     389             :   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
     390             :          AllocationSpaceName(space->identity()), number_of_pages,
     391           0 :          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
     392           0 : }
     393             : 
     394       53425 : bool MarkCompactCollector::StartCompaction() {
     395       53425 :   if (!compacting_) {
     396             :     DCHECK(evacuation_candidates_.length() == 0);
     397             : 
     398      106850 :     CollectEvacuationCandidates(heap()->old_space());
     399             : 
     400       53425 :     if (FLAG_compact_code_space) {
     401       53425 :       CollectEvacuationCandidates(heap()->code_space());
     402           0 :     } else if (FLAG_trace_fragmentation) {
     403           0 :       TraceFragmentation(heap()->code_space());
     404             :     }
     405             : 
     406       53425 :     if (FLAG_trace_fragmentation) {
     407           0 :       TraceFragmentation(heap()->map_space());
     408             :     }
     409             : 
     410       53425 :     compacting_ = evacuation_candidates_.length() > 0;
     411             :   }
     412             : 
     413       53425 :   return compacting_;
     414             : }
     415             : 
     416       53346 : void MarkCompactCollector::CollectGarbage() {
     417             :   // Make sure that Prepare() has been called. The individual steps below will
     418             :   // update the state as they proceed.
     419             :   DCHECK(state_ == PREPARE_GC);
     420             : 
     421       53346 :   MarkLiveObjects();
     422             : 
     423             :   DCHECK(heap_->incremental_marking()->IsStopped());
     424             : 
     425       53346 :   ClearNonLiveReferences();
     426             : 
     427       53346 :   RecordObjectStats();
     428             : 
     429             : #ifdef VERIFY_HEAP
     430             :   if (FLAG_verify_heap) {
     431             :     FullMarkingVerifier verifier(heap());
     432             :     verifier.Run();
     433             :   }
     434             : #endif
     435             : 
     436       53346 :   StartSweepSpaces();
     437             : 
     438       53346 :   EvacuateNewSpaceAndCandidates();
     439             : 
     440       53346 :   Finish();
     441       53346 : }
     442             : 
     443             : #ifdef VERIFY_HEAP
     444             : void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
     445             :   for (Page* p : *space) {
     446             :     const MarkingState state = MarkingState::Internal(p);
     447             :     CHECK(state.bitmap()->IsClean());
     448             :     CHECK_EQ(0, state.live_bytes());
     449             :   }
     450             : }
     451             : 
     452             : 
     453             : void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
     454             :   for (Page* p : PageRange(space->bottom(), space->top())) {
     455             :     const MarkingState state = MarkingState::Internal(p);
     456             :     CHECK(state.bitmap()->IsClean());
     457             :     CHECK_EQ(0, state.live_bytes());
     458             :   }
     459             : }
     460             : 
     461             : 
     462             : void MarkCompactCollector::VerifyMarkbitsAreClean() {
     463             :   VerifyMarkbitsAreClean(heap_->old_space());
     464             :   VerifyMarkbitsAreClean(heap_->code_space());
     465             :   VerifyMarkbitsAreClean(heap_->map_space());
     466             :   VerifyMarkbitsAreClean(heap_->new_space());
     467             : 
     468             :   LargeObjectIterator it(heap_->lo_space());
     469             :   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     470             :     CHECK(ObjectMarking::IsWhite(obj, MarkingState::Internal(obj)));
     471             :     CHECK_EQ(0, MarkingState::Internal(obj).live_bytes());
     472             :   }
     473             : }
     474             : 
     475             : void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
     476             :   HeapObjectIterator code_iterator(heap()->code_space());
     477             :   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
     478             :        obj = code_iterator.Next()) {
     479             :     Code* code = Code::cast(obj);
     480             :     if (!code->is_optimized_code()) continue;
     481             :     if (WillBeDeoptimized(code)) continue;
     482             :     code->VerifyEmbeddedObjectsDependency();
     483             :   }
     484             : }
     485             : 
     486             : 
     487             : void MarkCompactCollector::VerifyOmittedMapChecks() {
     488             :   HeapObjectIterator iterator(heap()->map_space());
     489             :   for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
     490             :     Map* map = Map::cast(obj);
     491             :     map->VerifyOmittedMapChecks();
     492             :   }
     493             : }
     494             : #endif  // VERIFY_HEAP
     495             : 
     496             : 
     497        5790 : static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
     498       46784 :   for (Page* p : *space) {
     499             :     MarkingState::Internal(p).ClearLiveness();
     500             :   }
     501        5790 : }
     502             : 
     503             : 
     504        1930 : static void ClearMarkbitsInNewSpace(NewSpace* space) {
     505       10414 :   for (Page* p : *space) {
     506             :     MarkingState::Internal(p).ClearLiveness();
     507             :   }
     508        1930 : }
     509             : 
     510             : 
     511        1930 : void MarkCompactCollector::ClearMarkbits() {
     512        9650 :   ClearMarkbitsInPagedSpace(heap_->code_space());
     513        3860 :   ClearMarkbitsInPagedSpace(heap_->map_space());
     514        3860 :   ClearMarkbitsInPagedSpace(heap_->old_space());
     515        3860 :   ClearMarkbitsInNewSpace(heap_->new_space());
     516        3860 :   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
     517        1930 : }
     518             : 
     519             : class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
     520             :  public:
     521             :   SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
     522             :               base::AtomicNumber<intptr_t>* num_sweeping_tasks,
     523             :               AllocationSpace space_to_start)
     524             :       : sweeper_(sweeper),
     525             :         pending_sweeper_tasks_(pending_sweeper_tasks),
     526             :         num_sweeping_tasks_(num_sweeping_tasks),
     527      159189 :         space_to_start_(space_to_start) {}
     528             : 
     529      318292 :   virtual ~SweeperTask() {}
     530             : 
     531             :  private:
     532             :   // v8::Task overrides.
     533      159020 :   void Run() override {
     534             :     DCHECK_GE(space_to_start_, FIRST_SPACE);
     535             :     DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
     536      159020 :     const int offset = space_to_start_ - FIRST_SPACE;
     537             :     const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
     538      795727 :     for (int i = 0; i < num_spaces; i++) {
     539      636544 :       const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
     540             :       DCHECK_GE(space_id, FIRST_SPACE);
     541             :       DCHECK_LE(space_id, LAST_PAGED_SPACE);
     542      636544 :       sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
     543             :     }
     544      159183 :     num_sweeping_tasks_->Decrement(1);
     545      159185 :     pending_sweeper_tasks_->Signal();
     546      159170 :   }
     547             : 
     548             :   Sweeper* sweeper_;
     549             :   base::Semaphore* pending_sweeper_tasks_;
     550             :   base::AtomicNumber<intptr_t>* num_sweeping_tasks_;
     551             :   AllocationSpace space_to_start_;
     552             : 
     553             :   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
     554             : };
     555             : 
     556       53346 : void MarkCompactCollector::Sweeper::StartSweeping() {
     557       53346 :   sweeping_in_progress_ = true;
     558      213384 :   ForAllSweepingSpaces([this](AllocationSpace space) {
     559      213384 :     std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
     560             :               [](Page* a, Page* b) {
     561             :                 return MarkingState::Internal(a).live_bytes() <
     562             :                        MarkingState::Internal(b).live_bytes();
     563      426768 :               });
     564      213384 :   });
     565       53346 : }
     566             : 
     567       53346 : void MarkCompactCollector::Sweeper::StartSweeperTasks() {
     568       53346 :   if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
     569      212252 :     ForAllSweepingSpaces([this](AllocationSpace space) {
     570      424504 :       if (space == NEW_SPACE) return;
     571      159189 :       num_sweeping_tasks_.Increment(1);
     572      159189 :       semaphore_counter_++;
     573      159189 :       V8::GetCurrentPlatform()->CallOnBackgroundThread(
     574             :           new SweeperTask(this, &pending_sweeper_tasks_semaphore_,
     575      159189 :                           &num_sweeping_tasks_, space),
     576      318378 :           v8::Platform::kShortRunningTask);
     577             :     });
     578             :   }
     579       53346 : }
     580             : 
     581     5895843 : void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
     582             :     Page* page) {
     583     5895843 :   if (!page->SweepingDone()) {
     584        2219 :     ParallelSweepPage(page, page->owner()->identity());
     585        2219 :     if (!page->SweepingDone()) {
     586             :       // We were not able to sweep that page, i.e., a concurrent
     587             :       // sweeper thread currently owns this page. Wait for the sweeper
     588             :       // thread to be done with this page.
     589             :       page->WaitUntilSweepingCompleted();
     590             :     }
     591             :   }
     592     5895843 : }
     593             : 
     594        3815 : void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
     595        3815 :   if (FLAG_concurrent_sweeping && sweeper().sweeping_in_progress()) {
     596          25 :     sweeper().ParallelSweepSpace(space->identity(), 0);
     597          25 :     space->RefillFreeList();
     598             :   }
     599        3815 : }
     600             : 
     601      729108 : Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
     602      729108 :   base::LockGuard<base::Mutex> guard(&mutex_);
     603      729128 :   SweptList& list = swept_list_[space->identity()];
     604      729128 :   if (list.length() > 0) {
     605      479652 :     return list.RemoveLast();
     606             :   }
     607             :   return nullptr;
     608             : }
     609             : 
     610       53317 : void MarkCompactCollector::Sweeper::EnsureCompleted() {
     611      106634 :   if (!sweeping_in_progress_) return;
     612             : 
     613             :   // If sweeping is not completed or not running at all, we try to complete it
     614             :   // here.
     615             :   ForAllSweepingSpaces(
     616      266585 :       [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
     617             : 
     618       53317 :   if (FLAG_concurrent_sweeping) {
     619      212142 :     while (semaphore_counter_ > 0) {
     620      159102 :       pending_sweeper_tasks_semaphore_.Wait();
     621      159102 :       semaphore_counter_--;
     622             :     }
     623             :   }
     624             : 
     625      213268 :   ForAllSweepingSpaces([this](AllocationSpace space) {
     626      213268 :     if (space == NEW_SPACE) {
     627       53317 :       swept_list_[NEW_SPACE].Clear();
     628             :     }
     629             :     DCHECK(sweeping_list_[space].empty());
     630      213268 :   });
     631       53317 :   sweeping_in_progress_ = false;
     632             : }
     633             : 
     634       69189 : void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
     635      138378 :   if (!sweeping_in_progress_) return;
     636        1503 :   if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
     637       12154 :     for (Page* p : *heap_->new_space()) {
     638        4574 :       SweepOrWaitUntilSweepingCompleted(p);
     639             :     }
     640             :   }
     641             : }
     642             : 
     643      152914 : void MarkCompactCollector::EnsureSweepingCompleted() {
     644      305828 :   if (!sweeper().sweeping_in_progress()) return;
     645             : 
     646       53317 :   sweeper().EnsureCompleted();
     647      159951 :   heap()->old_space()->RefillFreeList();
     648       53317 :   heap()->code_space()->RefillFreeList();
     649       53317 :   heap()->map_space()->RefillFreeList();
     650             : 
     651             : #ifdef VERIFY_HEAP
     652             :   if (FLAG_verify_heap && !evacuation()) {
     653             :     FullEvacuationVerifier verifier(heap_);
     654             :     verifier.Run();
     655             :   }
     656             : #endif
     657             : 
     658      106634 :   if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
     659       16411 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
     660             : }
     661             : 
     662       19711 : bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
     663       19711 :   return num_sweeping_tasks_.Value() != 0;
     664             : }
     665             : 
     666      101966 : void MarkCompactCollector::ComputeEvacuationHeuristics(
     667             :     size_t area_size, int* target_fragmentation_percent,
     668             :     size_t* max_evacuated_bytes) {
     669             :   // For memory reducing and optimize for memory mode we directly define both
     670             :   // constants.
     671             :   const int kTargetFragmentationPercentForReduceMemory = 20;
     672             :   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
     673             :   const int kTargetFragmentationPercentForOptimizeMemory = 20;
     674             :   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
     675             : 
     676             :   // For regular mode (which is latency critical) we define less aggressive
     677             :   // defaults to start and switch to a trace-based (using compaction speed)
     678             :   // approach as soon as we have enough samples.
     679             :   const int kTargetFragmentationPercent = 70;
     680             :   const size_t kMaxEvacuatedBytes = 4 * MB;
     681             :   // Time to take for a single area (=payload of page). Used as soon as there
     682             :   // exist enough compaction speed samples.
     683             :   const float kTargetMsPerArea = .5;
     684             : 
     685      276838 :   if (heap()->ShouldReduceMemory()) {
     686       28940 :     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
     687       28940 :     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
     688       73026 :   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
     689             :     *target_fragmentation_percent =
     690         120 :         kTargetFragmentationPercentForOptimizeMemory;
     691         120 :     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
     692             :   } else {
     693             :     const double estimated_compaction_speed =
     694       72906 :         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
     695       72906 :     if (estimated_compaction_speed != 0) {
     696             :       // Estimate the target fragmentation based on traced compaction speed
     697             :       // and a goal for a single page.
     698             :       const double estimated_ms_per_area =
     699       61438 :           1 + area_size / estimated_compaction_speed;
     700             :       *target_fragmentation_percent = static_cast<int>(
     701       61438 :           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
     702       61438 :       if (*target_fragmentation_percent <
     703             :           kTargetFragmentationPercentForReduceMemory) {
     704             :         *target_fragmentation_percent =
     705           0 :             kTargetFragmentationPercentForReduceMemory;
     706             :       }
     707             :     } else {
     708       11468 :       *target_fragmentation_percent = kTargetFragmentationPercent;
     709             :     }
     710       72906 :     *max_evacuated_bytes = kMaxEvacuatedBytes;
     711             :   }
     712      101966 : }
     713             : 
     714             : 
     715      213700 : void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
     716             :   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
     717             : 
     718      106850 :   int number_of_pages = space->CountTotalPages();
     719      106850 :   size_t area_size = space->AreaSize();
     720             : 
     721             :   // Pairs of (live_bytes_in_page, page).
     722             :   typedef std::pair<size_t, Page*> LiveBytesPagePair;
     723             :   std::vector<LiveBytesPagePair> pages;
     724      106850 :   pages.reserve(number_of_pages);
     725             : 
     726             :   DCHECK(!sweeping_in_progress());
     727             :   Page* owner_of_linear_allocation_area =
     728             :       space->top() == space->limit()
     729             :           ? nullptr
     730      106850 :           : Page::FromAllocationAreaAddress(space->top());
     731      982224 :   for (Page* p : *space) {
     732      384262 :     if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
     733             :     // Invariant: Evacuation candidates are just created when marking is
     734             :     // started. This means that sweeping has finished. Furthermore, at the end
     735             :     // of a GC all evacuation candidates are cleared and their slot buffers are
     736             :     // released.
     737      121363 :     CHECK(!p->IsEvacuationCandidate());
     738      121363 :     CHECK_NULL(p->slot_set<OLD_TO_OLD>());
     739      121363 :     CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
     740      121363 :     CHECK(p->SweepingDone());
     741             :     DCHECK(p->area_size() == area_size);
     742      242726 :     pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
     743             :   }
     744             : 
     745             :   int candidate_count = 0;
     746             :   size_t total_live_bytes = 0;
     747             : 
     748      106850 :   const bool reduce_memory = heap()->ShouldReduceMemory();
     749      106850 :   if (FLAG_manual_evacuation_candidates_selection) {
     750       23932 :     for (size_t i = 0; i < pages.size(); i++) {
     751       10362 :       Page* p = pages[i].second;
     752       10362 :       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
     753        5186 :         candidate_count++;
     754        5186 :         total_live_bytes += pages[i].first;
     755             :         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     756             :         AddEvacuationCandidate(p);
     757             :       }
     758             :     }
     759      103642 :   } else if (FLAG_stress_compaction) {
     760        5460 :     for (size_t i = 0; i < pages.size(); i++) {
     761        1892 :       Page* p = pages[i].second;
     762        1892 :       if (i % 2 == 0) {
     763        1361 :         candidate_count++;
     764        1361 :         total_live_bytes += pages[i].first;
     765             :         AddEvacuationCandidate(p);
     766             :       }
     767             :     }
     768             :   } else {
     769             :     // The following approach determines the pages that should be evacuated.
     770             :     //
     771             :     // We use two conditions to decide whether a page qualifies as an evacuation
     772             :     // candidate, or not:
     773             :     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
     774             :     //   between live bytes and capacity of this page (= area).
     775             :     // * Evacuation quota: A global quota determining how much bytes should be
     776             :     //   compacted.
     777             :     //
     778             :     // The algorithm sorts all pages by live bytes and then iterates through
     779             :     // them starting with the page with the most free memory, adding them to the
     780             :     // set of evacuation candidates as long as both conditions (fragmentation
     781             :     // and quota) hold.
     782             :     size_t max_evacuated_bytes;
     783             :     int target_fragmentation_percent;
     784             :     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
     785      101966 :                                 &max_evacuated_bytes);
     786             : 
     787             :     const size_t free_bytes_threshold =
     788      101966 :         target_fragmentation_percent * (area_size / 100);
     789             : 
     790             :     // Sort pages from the most free to the least free, then select
     791             :     // the first n pages for evacuation such that:
     792             :     // - the total size of evacuated objects does not exceed the specified
     793             :     // limit.
     794             :     // - fragmentation of (n+1)-th page does not exceed the specified limit.
     795             :     std::sort(pages.begin(), pages.end(),
     796             :               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
     797             :                 return a.first < b.first;
     798      101966 :               });
     799      422150 :     for (size_t i = 0; i < pages.size(); i++) {
     800      109109 :       size_t live_bytes = pages[i].first;
     801             :       DCHECK_GE(area_size, live_bytes);
     802      109109 :       size_t free_bytes = area_size - live_bytes;
     803      109109 :       if (FLAG_always_compact ||
     804       35216 :           ((free_bytes >= free_bytes_threshold) &&
     805       35216 :            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
     806       35278 :         candidate_count++;
     807       35278 :         total_live_bytes += live_bytes;
     808             :       }
     809      109109 :       if (FLAG_trace_fragmentation_verbose) {
     810             :         PrintIsolate(isolate(),
     811             :                      "compaction-selection-page: space=%s free_bytes_page=%zu "
     812             :                      "fragmentation_limit_kb=%" PRIuS
     813             :                      " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
     814             :                      "compaction_limit_kb=%zu\n",
     815             :                      AllocationSpaceName(space->identity()), free_bytes / KB,
     816             :                      free_bytes_threshold / KB, target_fragmentation_percent,
     817           0 :                      total_live_bytes / KB, max_evacuated_bytes / KB);
     818             :       }
     819             :     }
     820             :     // How many pages we will allocated for the evacuated objects
     821             :     // in the worst case: ceil(total_live_bytes / area_size)
     822             :     int estimated_new_pages =
     823      101966 :         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
     824             :     DCHECK_LE(estimated_new_pages, candidate_count);
     825             :     int estimated_released_pages = candidate_count - estimated_new_pages;
     826             :     // Avoid (compact -> expand) cycles.
     827      101966 :     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
     828             :       candidate_count = 0;
     829             :     }
     830      105119 :     for (int i = 0; i < candidate_count; i++) {
     831        6306 :       AddEvacuationCandidate(pages[i].second);
     832             :     }
     833             :   }
     834             : 
     835      106850 :   if (FLAG_trace_fragmentation) {
     836             :     PrintIsolate(isolate(),
     837             :                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
     838             :                  "total_live_bytes=%zu\n",
     839             :                  AllocationSpaceName(space->identity()), reduce_memory,
     840           0 :                  candidate_count, total_live_bytes / KB);
     841             :   }
     842      106850 : }
     843             : 
     844             : 
     845       59337 : void MarkCompactCollector::AbortCompaction() {
     846       59337 :   if (compacting_) {
     847           4 :     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
     848          43 :     for (Page* p : evacuation_candidates_) {
     849             :       p->ClearEvacuationCandidate();
     850             :     }
     851           4 :     compacting_ = false;
     852             :     evacuation_candidates_.Rewind(0);
     853             :   }
     854             :   DCHECK_EQ(0, evacuation_candidates_.length());
     855       59337 : }
     856             : 
     857             : 
     858       53346 : void MarkCompactCollector::Prepare() {
     859      425747 :   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
     860             : 
     861             : #ifdef DEBUG
     862             :   DCHECK(state_ == IDLE);
     863             :   state_ = PREPARE_GC;
     864             : #endif
     865             : 
     866             :   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
     867             : 
     868             :   // Instead of waiting we could also abort the sweeper threads here.
     869       53346 :   EnsureSweepingCompleted();
     870             : 
     871      106692 :   if (heap()->incremental_marking()->IsSweeping()) {
     872          25 :     heap()->incremental_marking()->Stop();
     873             :   }
     874             : 
     875             :   // If concurrent unmapping tasks are still running, we should wait for
     876             :   // them here.
     877       53346 :   heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
     878             : 
     879       53346 :   heap()->concurrent_marking()->EnsureTaskCompleted();
     880             : 
     881             :   // Clear marking bits if incremental marking is aborted.
     882      106796 :   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
     883          52 :     heap()->incremental_marking()->Stop();
     884          52 :     heap()->incremental_marking()->AbortBlackAllocation();
     885          52 :     ClearMarkbits();
     886          52 :     AbortWeakCollections();
     887          52 :     AbortWeakCells();
     888          52 :     AbortTransitionArrays();
     889          52 :     AbortCompaction();
     890         104 :     heap_->local_embedder_heap_tracer()->AbortTracing();
     891             :     marking_deque()->Clear();
     892          52 :     was_marked_incrementally_ = false;
     893             :   }
     894             : 
     895       53346 :   if (!was_marked_incrementally_) {
     896      208884 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
     897      156663 :     heap_->local_embedder_heap_tracer()->TracePrologue();
     898             :   }
     899             : 
     900             :   // Don't start compaction if we are in the middle of incremental
     901             :   // marking cycle. We did not collect any slots.
     902       53346 :   if (!FLAG_never_compact && !was_marked_incrementally_) {
     903       52221 :     StartCompaction();
     904             :   }
     905             : 
     906             :   PagedSpaces spaces(heap());
     907      213384 :   for (PagedSpace* space = spaces.next(); space != NULL;
     908             :        space = spaces.next()) {
     909      160038 :     space->PrepareForMarkCompact();
     910             :   }
     911       53346 :   heap()->account_external_memory_concurrently_freed();
     912             : 
     913             : #ifdef VERIFY_HEAP
     914             :   if (!was_marked_incrementally_ && FLAG_verify_heap) {
     915             :     VerifyMarkbitsAreClean();
     916             :   }
     917             : #endif
     918       53346 : }
     919             : 
     920             : 
     921       53346 : void MarkCompactCollector::Finish() {
     922      320076 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
     923             : 
     924       53346 :   if (!heap()->delay_sweeper_tasks_for_testing_) {
     925       53340 :     sweeper().StartSweeperTasks();
     926             :   }
     927             : 
     928             :   // The hashing of weak_object_to_code_table is no longer valid.
     929       53346 :   heap()->weak_object_to_code_table()->Rehash(
     930       53346 :       heap()->isolate()->factory()->undefined_value());
     931             : 
     932             :   // Clear the marking state of live large objects.
     933      106692 :   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
     934             : 
     935             : #ifdef DEBUG
     936             :   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
     937             :   state_ = IDLE;
     938             : #endif
     939      106692 :   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
     940             : 
     941             :   // The stub caches are not traversed during GC; clear them to force
     942             :   // their lazy re-initialization. This must be done after the
     943             :   // GC, because it relies on the new address of certain old space
     944             :   // objects (empty string, illegal builtin).
     945       53346 :   isolate()->load_stub_cache()->Clear();
     946       53346 :   isolate()->store_stub_cache()->Clear();
     947             : 
     948       53346 :   if (have_code_to_deoptimize_) {
     949             :     // Some code objects were marked for deoptimization during the GC.
     950        1144 :     Deoptimizer::DeoptimizeMarkedCode(isolate());
     951        1144 :     have_code_to_deoptimize_ = false;
     952             :   }
     953             : 
     954      160038 :   heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
     955       53346 : }
     956             : 
     957             : 
     958             : // -------------------------------------------------------------------------
     959             : // Phase 1: tracing and marking live objects.
     960             : //   before: all objects are in normal state.
     961             : //   after: a live object's map pointer is marked as '00'.
     962             : 
     963             : // Marking all live objects in the heap as part of mark-sweep or mark-compact
     964             : // collection.  Before marking, all objects are in their normal state.  After
     965             : // marking, live objects' map pointers are marked indicating that the object
     966             : // has been found reachable.
     967             : //
     968             : // The marking algorithm is a (mostly) depth-first (because of possible stack
     969             : // overflow) traversal of the graph of objects reachable from the roots.  It
     970             : // uses an explicit stack of pointers rather than recursion.  The young
     971             : // generation's inactive ('from') space is used as a marking stack.  The
     972             : // objects in the marking stack are the ones that have been reached and marked
     973             : // but their children have not yet been visited.
     974             : //
     975             : // The marking stack can overflow during traversal.  In that case, we set an
     976             : // overflow flag.  When the overflow flag is set, we continue marking objects
     977             : // reachable from the objects on the marking stack, but no longer push them on
     978             : // the marking stack.  Instead, we mark them as both marked and overflowed.
     979             : // When the stack is in the overflowed state, objects marked as overflowed
     980             : // have been reached and marked but their children have not been visited yet.
     981             : // After emptying the marking stack, we clear the overflow flag and traverse
     982             : // the heap looking for objects marked as overflowed, push them on the stack,
     983             : // and continue with marking.  This process repeats until all reachable
     984             : // objects have been marked.
     985             : 
     986       53346 : void CodeFlusher::ProcessJSFunctionCandidates() {
     987       53346 :   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
     988             :   Code* interpreter_entry_trampoline =
     989             :       isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
     990       53346 :   Object* undefined = isolate_->heap()->undefined_value();
     991             : 
     992       53346 :   JSFunction* candidate = jsfunction_candidates_head_;
     993             :   JSFunction* next_candidate;
     994      110391 :   while (candidate != NULL) {
     995             :     next_candidate = GetNextCandidate(candidate);
     996             :     ClearNextCandidate(candidate, undefined);
     997             : 
     998             :     SharedFunctionInfo* shared = candidate->shared();
     999             : 
    1000             :     Code* code = shared->code();
    1001        3699 :     if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
    1002           0 :       if (FLAG_trace_code_flushing && shared->is_compiled()) {
    1003           0 :         PrintF("[code-flushing clears: ");
    1004           0 :         shared->ShortPrint();
    1005           0 :         PrintF(" - age: %d]\n", code->GetAge());
    1006             :       }
    1007             :       // Always flush the optimized code map if there is one.
    1008           0 :       if (!shared->OptimizedCodeMapIsCleared()) {
    1009           0 :         shared->ClearOptimizedCodeMap();
    1010             :       }
    1011           0 :       if (shared->HasBytecodeArray()) {
    1012           0 :         shared->set_code(interpreter_entry_trampoline);
    1013           0 :         candidate->set_code(interpreter_entry_trampoline);
    1014             :       } else {
    1015           0 :         shared->set_code(lazy_compile);
    1016           0 :         candidate->set_code(lazy_compile);
    1017             :       }
    1018             :     } else {
    1019             :       DCHECK(ObjectMarking::IsBlack(code, MarkingState::Internal(code)));
    1020        3699 :       candidate->set_code(code);
    1021             :     }
    1022             : 
    1023             :     // We are in the middle of a GC cycle so the write barrier in the code
    1024             :     // setter did not record the slot update and we have to do that manually.
    1025        3699 :     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
    1026        3699 :     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
    1027             :     isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
    1028        3699 :         candidate, slot, target);
    1029             : 
    1030             :     Object** shared_code_slot =
    1031        3699 :         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
    1032             :     isolate_->heap()->mark_compact_collector()->RecordSlot(
    1033        3699 :         shared, shared_code_slot, *shared_code_slot);
    1034             : 
    1035             :     candidate = next_candidate;
    1036             :   }
    1037             : 
    1038       53346 :   jsfunction_candidates_head_ = NULL;
    1039       53346 : }
    1040             : 
    1041             : 
    1042       53346 : void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
    1043       53346 :   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
    1044             :   Code* interpreter_entry_trampoline =
    1045             :       isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
    1046       53346 :   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
    1047             :   SharedFunctionInfo* next_candidate;
    1048      116585 :   while (candidate != NULL) {
    1049             :     next_candidate = GetNextCandidate(candidate);
    1050             :     ClearNextCandidate(candidate);
    1051             : 
    1052             :     Code* code = candidate->code();
    1053        9893 :     if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
    1054        4762 :       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
    1055           0 :         PrintF("[code-flushing clears: ");
    1056           0 :         candidate->ShortPrint();
    1057           0 :         PrintF(" - age: %d]\n", code->GetAge());
    1058             :       }
    1059             :       // Always flush the optimized code map if there is one.
    1060        4762 :       if (!candidate->OptimizedCodeMapIsCleared()) {
    1061         295 :         candidate->ClearOptimizedCodeMap();
    1062             :       }
    1063        4762 :       if (candidate->HasBytecodeArray()) {
    1064           0 :         candidate->set_code(interpreter_entry_trampoline);
    1065             :       } else {
    1066        4762 :         candidate->set_code(lazy_compile);
    1067             :       }
    1068             :     }
    1069             : 
    1070             :     Object** code_slot =
    1071        9893 :         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
    1072             :     isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
    1073        9893 :                                                            *code_slot);
    1074             : 
    1075             :     candidate = next_candidate;
    1076             :   }
    1077             : 
    1078       53346 :   shared_function_info_candidates_head_ = NULL;
    1079       53346 : }
    1080             : 
    1081             : 
    1082           0 : void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
    1083             :   // Make sure previous flushing decisions are revisited.
    1084           0 :   isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
    1085             : 
    1086           0 :   if (FLAG_trace_code_flushing) {
    1087           0 :     PrintF("[code-flushing abandons function-info: ");
    1088           0 :     shared_info->ShortPrint();
    1089           0 :     PrintF("]\n");
    1090             :   }
    1091             : 
    1092           0 :   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
    1093             :   SharedFunctionInfo* next_candidate;
    1094           0 :   if (candidate == shared_info) {
    1095             :     next_candidate = GetNextCandidate(shared_info);
    1096           0 :     shared_function_info_candidates_head_ = next_candidate;
    1097             :     ClearNextCandidate(shared_info);
    1098             :   } else {
    1099           0 :     while (candidate != NULL) {
    1100             :       next_candidate = GetNextCandidate(candidate);
    1101             : 
    1102           0 :       if (next_candidate == shared_info) {
    1103             :         next_candidate = GetNextCandidate(shared_info);
    1104             :         SetNextCandidate(candidate, next_candidate);
    1105             :         ClearNextCandidate(shared_info);
    1106             :         break;
    1107             :       }
    1108             : 
    1109             :       candidate = next_candidate;
    1110             :     }
    1111             :   }
    1112           0 : }
    1113             : 
    1114             : 
    1115           2 : void CodeFlusher::EvictCandidate(JSFunction* function) {
    1116             :   DCHECK(!function->next_function_link()->IsUndefined(isolate_));
    1117           2 :   Object* undefined = isolate_->heap()->undefined_value();
    1118             : 
    1119             :   // Make sure previous flushing decisions are revisited.
    1120           2 :   isolate_->heap()->incremental_marking()->IterateBlackObject(function);
    1121             :   isolate_->heap()->incremental_marking()->IterateBlackObject(
    1122           2 :       function->shared());
    1123             : 
    1124           2 :   if (FLAG_trace_code_flushing) {
    1125           0 :     PrintF("[code-flushing abandons closure: ");
    1126           0 :     function->shared()->ShortPrint();
    1127           0 :     PrintF("]\n");
    1128             :   }
    1129             : 
    1130           2 :   JSFunction* candidate = jsfunction_candidates_head_;
    1131             :   JSFunction* next_candidate;
    1132           2 :   if (candidate == function) {
    1133             :     next_candidate = GetNextCandidate(function);
    1134           2 :     jsfunction_candidates_head_ = next_candidate;
    1135             :     ClearNextCandidate(function, undefined);
    1136             :   } else {
    1137           0 :     while (candidate != NULL) {
    1138             :       next_candidate = GetNextCandidate(candidate);
    1139             : 
    1140           0 :       if (next_candidate == function) {
    1141             :         next_candidate = GetNextCandidate(function);
    1142             :         SetNextCandidate(candidate, next_candidate);
    1143             :         ClearNextCandidate(function, undefined);
    1144             :         break;
    1145             :       }
    1146             : 
    1147             :       candidate = next_candidate;
    1148             :     }
    1149             :   }
    1150           2 : }
    1151             : 
    1152             : 
    1153             : class StaticYoungGenerationMarkingVisitor
    1154             :     : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
    1155             :  public:
    1156             :   static void Initialize(Heap* heap) {
    1157           0 :     StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
    1158             :   }
    1159             : 
    1160           0 :   inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
    1161           0 :     Object* target = *p;
    1162           0 :     if (heap->InNewSpace(target)) {
    1163             :       HeapObject* target_object = HeapObject::cast(target);
    1164           0 :       if (MarkRecursively(heap, target_object)) return;
    1165             :       heap->minor_mark_compact_collector()->MarkObject(target_object);
    1166             :     }
    1167             :   }
    1168             : 
    1169             :  protected:
    1170           0 :   inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
    1171             :     StackLimitCheck check(heap->isolate());
    1172           0 :     if (check.HasOverflowed()) return false;
    1173             : 
    1174           0 :     if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
    1175             :             object, MarkingState::External(object)))
    1176             :       return true;
    1177             :     ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
    1178             :         object, MarkingState::External(object));
    1179             :     IterateBody(object->map(), object);
    1180           0 :     return true;
    1181             :   }
    1182             : };
    1183             : 
    1184             : class MarkCompactMarkingVisitor
    1185             :     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
    1186             :  public:
    1187             :   static void Initialize();
    1188             : 
    1189     2186654 :   INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
    1190     2186654 :     MarkObjectByPointer(heap->mark_compact_collector(), object, p);
    1191             :   }
    1192             : 
    1193   466622387 :   INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
    1194             :                                    Object** start, Object** end)) {
    1195             :     // Mark all objects pointed to in [start, end).
    1196             :     const int kMinRangeForMarkingRecursion = 64;
    1197   473355608 :     if (end - start >= kMinRangeForMarkingRecursion) {
    1198     6733250 :       if (VisitUnmarkedObjects(heap, object, start, end)) return;
    1199             :       // We are close to a stack overflow, so just mark the objects.
    1200             :     }
    1201   466622387 :     MarkCompactCollector* collector = heap->mark_compact_collector();
    1202  3018929233 :     for (Object** p = start; p < end; p++) {
    1203             :       MarkObjectByPointer(collector, object, p);
    1204             :     }
    1205             :   }
    1206             : 
    1207             :   // Marks the object black and pushes it on the marking stack.
    1208   456009173 :   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
    1209   456009173 :     heap->mark_compact_collector()->MarkObject(object);
    1210             :   }
    1211             : 
    1212             :   // Marks the object black without pushing it on the marking stack.
    1213             :   // Returns true if object needed marking and false otherwise.
    1214             :   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
    1215    60513372 :     if (ObjectMarking::IsWhite(object, MarkingState::Internal(object))) {
    1216    15215312 :       ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
    1217             :       return true;
    1218             :     }
    1219             :     return false;
    1220             :   }
    1221             : 
    1222             :   // Mark object pointed to by p.
    1223             :   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
    1224             :                                          HeapObject* object, Object** p)) {
    1225  6042283534 :     if (!(*p)->IsHeapObject()) return;
    1226             :     HeapObject* target_object = HeapObject::cast(*p);
    1227             :     collector->RecordSlot(object, p, target_object);
    1228             :     collector->MarkObject(target_object);
    1229             :   }
    1230             : 
    1231             : 
    1232             :   // Visit an unmarked object.
    1233             :   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
    1234             :                                          HeapObject* obj)) {
    1235             : #ifdef DEBUG
    1236             :     DCHECK(collector->heap()->Contains(obj));
    1237             :     DCHECK(ObjectMarking::IsWhite(obj, MarkingState::Internal(obj)));
    1238             : #endif
    1239   153967804 :     Map* map = obj->map();
    1240   307935503 :     Heap* heap = obj->GetHeap();
    1241   307935422 :     ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
    1242             :     // Mark the map pointer and the body.
    1243   153967692 :     heap->mark_compact_collector()->MarkObject(map);
    1244             :     IterateBody(map, obj);
    1245             :   }
    1246             : 
    1247             :   // Visit all unmarked objects pointed to by [start, end).
    1248             :   // Returns false if the operation fails (lack of stack space).
    1249             :   INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
    1250             :                                           Object** start, Object** end)) {
    1251             :     // Return false is we are close to the stack limit.
    1252     6733221 :     StackLimitCheck check(heap->isolate());
    1253     6733221 :     if (check.HasOverflowed()) return false;
    1254             : 
    1255             :     MarkCompactCollector* collector = heap->mark_compact_collector();
    1256             :     // Visit the unmarked objects.
    1257  1925854631 :     for (Object** p = start; p < end; p++) {
    1258  1925854602 :       Object* o = *p;
    1259  1925854602 :       if (!o->IsHeapObject()) continue;
    1260             :       collector->RecordSlot(object, p, o);
    1261             :       HeapObject* obj = HeapObject::cast(o);
    1262  3157346795 :       if (ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)))
    1263             :         continue;
    1264             :       VisitUnmarkedObject(collector, obj);
    1265             :     }
    1266             :     return true;
    1267             :   }
    1268             : 
    1269             :  private:
    1270             :   // Code flushing support.
    1271             : 
    1272             :   static const int kRegExpCodeThreshold = 5;
    1273             : 
    1274      103981 :   static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
    1275             :                                           bool is_one_byte) {
    1276             :     // Make sure that the fixed array is in fact initialized on the RegExp.
    1277             :     // We could potentially trigger a GC when initializing the RegExp.
    1278       97800 :     if (HeapObject::cast(re->data())->map()->instance_type() !=
    1279             :         FIXED_ARRAY_TYPE)
    1280             :       return;
    1281             : 
    1282             :     // Make sure this is a RegExp that actually contains code.
    1283       97800 :     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
    1284             : 
    1285             :     Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
    1286       96438 :     if (!code->IsSmi() &&
    1287             :         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
    1288             :       // Save a copy that can be reinstated if we need the code again.
    1289             :       re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
    1290             : 
    1291             :       // Saving a copy might create a pointer into compaction candidate
    1292             :       // that was not observed by marker.  This might happen if JSRegExp data
    1293             :       // was marked through the compilation cache before marker reached JSRegExp
    1294             :       // object.
    1295             :       FixedArray* data = FixedArray::cast(re->data());
    1296        1280 :       if (ObjectMarking::IsBlackOrGrey(data, MarkingState::Internal(data))) {
    1297             :         Object** slot =
    1298          94 :             data->data_start() + JSRegExp::saved_code_index(is_one_byte);
    1299             :         heap->mark_compact_collector()->RecordSlot(data, slot, code);
    1300             :       }
    1301             : 
    1302             :       // Set a number in the 0-255 range to guarantee no smi overflow.
    1303             :       re->SetDataAt(JSRegExp::code_index(is_one_byte),
    1304        1280 :                     Smi::FromInt(heap->ms_count() & 0xff));
    1305       93878 :     } else if (code->IsSmi()) {
    1306             :       int value = Smi::cast(code)->value();
    1307             :       // The regexp has not been compiled yet or there was a compilation error.
    1308       93878 :       if (value == JSRegExp::kUninitializedValue ||
    1309             :           value == JSRegExp::kCompilationErrorValue) {
    1310             :         return;
    1311             :       }
    1312             : 
    1313             :       // Check if we should flush now.
    1314        4901 :       if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
    1315             :         re->SetDataAt(JSRegExp::code_index(is_one_byte),
    1316             :                       Smi::FromInt(JSRegExp::kUninitializedValue));
    1317             :         re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
    1318             :                       Smi::FromInt(JSRegExp::kUninitializedValue));
    1319             :       }
    1320             :     }
    1321             :   }
    1322             : 
    1323             : 
    1324             :   // Works by setting the current sweep_generation (as a smi) in the
    1325             :   // code object place in the data array of the RegExp and keeps a copy
    1326             :   // around that can be reinstated if we reuse the RegExp before flushing.
    1327             :   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
    1328             :   // we flush the code.
    1329       48900 :   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
    1330       48900 :     Heap* heap = map->GetHeap();
    1331       48900 :     MarkCompactCollector* collector = heap->mark_compact_collector();
    1332       48900 :     if (!collector->is_code_flushing_enabled()) {
    1333             :       JSObjectVisitor::Visit(map, object);
    1334       48900 :       return;
    1335             :     }
    1336             :     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
    1337             :     // Flush code or set age on both one byte and two byte code.
    1338       48900 :     UpdateRegExpCodeAgeAndFlush(heap, re, true);
    1339       48900 :     UpdateRegExpCodeAgeAndFlush(heap, re, false);
    1340             :     // Visit the fields of the RegExp, including the updated FixedArray.
    1341             :     JSObjectVisitor::Visit(map, object);
    1342             :   }
    1343             : };
    1344             : 
    1345             : 
    1346           0 : void MarkCompactMarkingVisitor::Initialize() {
    1347       58018 :   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
    1348             : 
    1349             :   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
    1350           0 : }
    1351             : 
    1352             : 
    1353           0 : class CodeMarkingVisitor : public ThreadVisitor {
    1354             :  public:
    1355             :   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
    1356       53346 :       : collector_(collector) {}
    1357             : 
    1358        1026 :   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
    1359        1026 :     collector_->PrepareThreadForCodeFlushing(isolate, top);
    1360        1026 :   }
    1361             : 
    1362             :  private:
    1363             :   MarkCompactCollector* collector_;
    1364             : };
    1365             : 
    1366           0 : class SharedFunctionInfoMarkingVisitor : public ObjectVisitor,
    1367             :                                          public RootVisitor {
    1368             :  public:
    1369             :   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
    1370       53346 :       : collector_(collector) {}
    1371             : 
    1372       18086 :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
    1373       18086 :     for (Object** p = start; p < end; p++) MarkObject(p);
    1374       18086 :   }
    1375             : 
    1376           0 :   void VisitPointer(HeapObject* host, Object** slot) override {
    1377           0 :     MarkObject(slot);
    1378           0 :   }
    1379             : 
    1380      115102 :   void VisitRootPointers(Root root, Object** start, Object** end) override {
    1381      115102 :     for (Object** p = start; p < end; p++) MarkObject(p);
    1382      115102 :   }
    1383             : 
    1384         170 :   void VisitRootPointer(Root root, Object** slot) override { MarkObject(slot); }
    1385             : 
    1386             :  private:
    1387    18153009 :   void MarkObject(Object** slot) {
    1388    18153009 :     Object* obj = *slot;
    1389    18153009 :     if (obj->IsSharedFunctionInfo()) {
    1390             :       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
    1391      481201 :       collector_->MarkObject(shared->code());
    1392      481201 :       collector_->MarkObject(shared);
    1393             :     }
    1394    18153009 :   }
    1395             :   MarkCompactCollector* collector_;
    1396             : };
    1397             : 
    1398             : 
    1399       54372 : void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
    1400             :                                                         ThreadLocalTop* top) {
    1401      816198 :   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
    1402             :     // Note: for the frame that has a pending lazy deoptimization
    1403             :     // StackFrame::unchecked_code will return a non-optimized code object for
    1404             :     // the outermost function and StackFrame::LookupCode will return
    1405             :     // actual optimized code object.
    1406             :     StackFrame* frame = it.frame();
    1407      761826 :     Code* code = frame->unchecked_code();
    1408             :     MarkObject(code);
    1409      761826 :     if (frame->is_optimized()) {
    1410             :       Code* optimized_code = frame->LookupCode();
    1411             :       MarkObject(optimized_code);
    1412             :     }
    1413             :   }
    1414       54372 : }
    1415             : 
    1416             : 
    1417       53346 : void MarkCompactCollector::PrepareForCodeFlushing() {
    1418             :   // If code flushing is disabled, there is no need to prepare for it.
    1419       53346 :   if (!is_code_flushing_enabled()) return;
    1420             : 
    1421             :   // Make sure we are not referencing the code from the stack.
    1422             :   DCHECK(this == heap()->mark_compact_collector());
    1423             :   PrepareThreadForCodeFlushing(heap()->isolate(),
    1424      266730 :                                heap()->isolate()->thread_local_top());
    1425             : 
    1426             :   // Iterate the archived stacks in all threads to check if
    1427             :   // the code is referenced.
    1428             :   CodeMarkingVisitor code_marking_visitor(this);
    1429             :   heap()->isolate()->thread_manager()->IterateArchivedThreads(
    1430       53346 :       &code_marking_visitor);
    1431             : 
    1432             :   SharedFunctionInfoMarkingVisitor visitor(this);
    1433       53346 :   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
    1434       53346 :   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
    1435             : 
    1436       53346 :   ProcessMarkingDeque();
    1437             : }
    1438             : 
    1439           0 : class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
    1440             :  public:
    1441             :   explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
    1442           0 :       : collector_(collector) {}
    1443             : 
    1444           0 :   void VisitRootPointer(Root root, Object** p) override {
    1445           0 :     MarkObjectByPointer(p);
    1446           0 :   }
    1447             : 
    1448           0 :   void VisitRootPointers(Root root, Object** start, Object** end) override {
    1449           0 :     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    1450           0 :   }
    1451             : 
    1452             :  private:
    1453           0 :   void MarkObjectByPointer(Object** p) {
    1454           0 :     if (!(*p)->IsHeapObject()) return;
    1455             : 
    1456             :     HeapObject* object = HeapObject::cast(*p);
    1457             : 
    1458           0 :     if (!collector_->heap()->InNewSpace(object)) return;
    1459             : 
    1460           0 :     if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
    1461             :             object, MarkingState::External(object)))
    1462             :       return;
    1463             : 
    1464             :     Map* map = object->map();
    1465             :     ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
    1466             :         object, MarkingState::External(object));
    1467             :     StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
    1468             : 
    1469           0 :     collector_->EmptyMarkingDeque();
    1470             :   }
    1471             : 
    1472             :   MinorMarkCompactCollector* collector_;
    1473             : };
    1474             : 
    1475             : // Visitor class for marking heap roots.
    1476             : // TODO(ulan): Remove ObjectVisitor base class after fixing marking of
    1477             : // the string table and the top optimized code.
    1478       53346 : class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
    1479             :                                                  public RootVisitor {
    1480             :  public:
    1481       53346 :   explicit RootMarkingVisitor(Heap* heap)
    1482      106692 :       : collector_(heap->mark_compact_collector()) {}
    1483             : 
    1484       23846 :   void VisitPointer(HeapObject* host, Object** p) override {
    1485       23846 :     MarkObjectByPointer(p);
    1486       23846 :   }
    1487             : 
    1488       54290 :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
    1489       54290 :     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    1490       54290 :   }
    1491             : 
    1492   134025069 :   void VisitRootPointer(Root root, Object** p) override {
    1493   134025069 :     MarkObjectByPointer(p);
    1494   134025011 :   }
    1495             : 
    1496     1399566 :   void VisitRootPointers(Root root, Object** start, Object** end) override {
    1497     1399566 :     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    1498     1399565 :   }
    1499             : 
    1500             :   // Skip the weak next code link in a code object, which is visited in
    1501             :   // ProcessTopOptimizedFrame.
    1502         944 :   void VisitNextCodeLink(Code* host, Object** p) override {}
    1503             : 
    1504             :  private:
    1505   204077505 :   void MarkObjectByPointer(Object** p) {
    1506   408155010 :     if (!(*p)->IsHeapObject()) return;
    1507             : 
    1508             :     HeapObject* object = HeapObject::cast(*p);
    1509             : 
    1510   201358089 :     if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
    1511             :             object, MarkingState::Internal(object)))
    1512             :       return;
    1513             : 
    1514             :     Map* map = object->map();
    1515             :     // Mark the object.
    1516             :     ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
    1517             :         object, MarkingState::Internal(object));
    1518             : 
    1519             :     // Mark the map pointer and body, and push them on the marking stack.
    1520    77116968 :     collector_->MarkObject(map);
    1521             :     MarkCompactMarkingVisitor::IterateBody(map, object);
    1522             : 
    1523             :     // Mark all the objects reachable from the map and body.  May leave
    1524             :     // overflowed objects in the heap.
    1525    77117194 :     collector_->EmptyMarkingDeque();
    1526             :   }
    1527             : 
    1528             :   MarkCompactCollector* collector_;
    1529             : };
    1530             : 
    1531       53346 : class InternalizedStringTableCleaner : public ObjectVisitor {
    1532             :  public:
    1533             :   InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
    1534       53346 :       : heap_(heap), pointers_removed_(0), table_(table) {}
    1535             : 
    1536       53346 :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
    1537             :     // Visit all HeapObject pointers in [start, end).
    1538       53346 :     MarkCompactCollector* collector = heap_->mark_compact_collector();
    1539       53346 :     Object* the_hole = heap_->the_hole_value();
    1540   222412898 :     for (Object** p = start; p < end; p++) {
    1541   222359552 :       Object* o = *p;
    1542   222359552 :       if (o->IsHeapObject()) {
    1543             :         HeapObject* heap_object = HeapObject::cast(o);
    1544   222359552 :         if (ObjectMarking::IsWhite(heap_object,
    1545             :                                    MarkingState::Internal(heap_object))) {
    1546     4626856 :           pointers_removed_++;
    1547             :           // Set the entry to the_hole_value (as deleted).
    1548     4626856 :           *p = the_hole;
    1549             :         } else {
    1550             :           // StringTable contains only old space strings.
    1551             :           DCHECK(!heap_->InNewSpace(o));
    1552   217732696 :           collector->RecordSlot(table_, p, o);
    1553             :         }
    1554             :       }
    1555             :     }
    1556       53346 :   }
    1557             : 
    1558             :   int PointersRemoved() {
    1559             :     return pointers_removed_;
    1560             :   }
    1561             : 
    1562             :  private:
    1563             :   Heap* heap_;
    1564             :   int pointers_removed_;
    1565             :   HeapObject* table_;
    1566             : };
    1567             : 
    1568       53346 : class ExternalStringTableCleaner : public RootVisitor {
    1569             :  public:
    1570       53346 :   explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
    1571             : 
    1572       53496 :   void VisitRootPointers(Root root, Object** start, Object** end) override {
    1573             :     // Visit all HeapObject pointers in [start, end).
    1574       53496 :     Object* the_hole = heap_->the_hole_value();
    1575      987419 :     for (Object** p = start; p < end; p++) {
    1576      933923 :       Object* o = *p;
    1577      933923 :       if (o->IsHeapObject()) {
    1578             :         HeapObject* heap_object = HeapObject::cast(o);
    1579      933923 :         if (ObjectMarking::IsWhite(heap_object,
    1580             :                                    MarkingState::Internal(heap_object))) {
    1581        9149 :           if (o->IsExternalString()) {
    1582        9149 :             heap_->FinalizeExternalString(String::cast(*p));
    1583             :           } else {
    1584             :             // The original external string may have been internalized.
    1585             :             DCHECK(o->IsThinString());
    1586             :           }
    1587             :           // Set the entry to the_hole_value (as deleted).
    1588        9149 :           *p = the_hole;
    1589             :         }
    1590             :       }
    1591             :     }
    1592       53496 :   }
    1593             : 
    1594             :  private:
    1595             :   Heap* heap_;
    1596             : };
    1597             : 
    1598             : // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
    1599             : // are retained.
    1600       53346 : class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
    1601             :  public:
    1602     2952669 :   virtual Object* RetainAs(Object* object) {
    1603             :     HeapObject* heap_object = HeapObject::cast(object);
    1604             :     DCHECK(!ObjectMarking::IsGrey(heap_object,
    1605             :                                   MarkingState::Internal(heap_object)));
    1606     2952669 :     if (ObjectMarking::IsBlack(heap_object,
    1607             :                                MarkingState::Internal(heap_object))) {
    1608             :       return object;
    1609     1397203 :     } else if (object->IsAllocationSite() &&
    1610             :                !(AllocationSite::cast(object)->IsZombie())) {
    1611             :       // "dead" AllocationSites need to live long enough for a traversal of new
    1612             :       // space. These sites get a one-time reprieve.
    1613             :       AllocationSite* site = AllocationSite::cast(object);
    1614             :       site->MarkZombie();
    1615             :       ObjectMarking::WhiteToBlack(site, MarkingState::Internal(site));
    1616      328902 :       return object;
    1617             :     } else {
    1618             :       return NULL;
    1619             :     }
    1620             :   }
    1621             : };
    1622             : 
    1623             : 
    1624             : // Fill the marking stack with overflowed objects returned by the given
    1625             : // iterator.  Stop when the marking stack is filled or the end of the space
    1626             : // is reached, whichever comes first.
    1627             : template <class T>
    1628         940 : void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
    1629             :   // The caller should ensure that the marking stack is initially not full,
    1630             :   // so that we don't waste effort pointlessly scanning for objects.
    1631             :   DCHECK(!marking_deque()->IsFull());
    1632             : 
    1633         940 :   Map* filler_map = heap()->one_pointer_filler_map();
    1634         940 :   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
    1635           0 :     if ((object->map() != filler_map) &&
    1636             :         ObjectMarking::IsGrey(object, MarkingState::Internal(object))) {
    1637             :       ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
    1638             :       PushBlack(object);
    1639         940 :       if (marking_deque()->IsFull()) return;
    1640             :     }
    1641             :   }
    1642             : }
    1643             : 
    1644       59631 : void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
    1645             :   DCHECK(!marking_deque()->IsFull());
    1646       59631 :   LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p));
    1647             :   HeapObject* object = NULL;
    1648      941516 :   while ((object = it.Next()) != NULL) {
    1649             :     DCHECK(ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
    1650             :     ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
    1651             :     PushBlack(object);
    1652     1682920 :     if (marking_deque()->IsFull()) return;
    1653             :   }
    1654             : }
    1655             : 
    1656       53346 : class RecordMigratedSlotVisitor : public ObjectVisitor {
    1657             :  public:
    1658             :   explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
    1659       54028 :       : collector_(collector) {}
    1660             : 
    1661           0 :   inline void VisitPointer(HeapObject* host, Object** p) final {
    1662       59331 :     RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
    1663           0 :   }
    1664             : 
    1665       17311 :   inline void VisitPointers(HeapObject* host, Object** start,
    1666             :                             Object** end) final {
    1667   149318293 :     while (start < end) {
    1668   138672168 :       RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
    1669   138693643 :       ++start;
    1670             :     }
    1671       17311 :   }
    1672             : 
    1673     1097009 :   inline void VisitCodeEntry(JSFunction* host,
    1674             :                              Address code_entry_slot) override {
    1675     1097009 :     Address code_entry = Memory::Address_at(code_entry_slot);
    1676     1097009 :     if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
    1677             :       RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
    1678             :                                              nullptr, CODE_ENTRY_SLOT,
    1679        3500 :                                              code_entry_slot);
    1680             :     }
    1681     1097009 :   }
    1682             : 
    1683      123297 :   inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
    1684             :     DCHECK_EQ(host, rinfo->host());
    1685             :     DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
    1686      123297 :     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    1687             :     // The target is always in old space, we don't have to record the slot in
    1688             :     // the old-to-new remembered set.
    1689             :     DCHECK(!collector_->heap()->InNewSpace(target));
    1690      123297 :     collector_->RecordRelocSlot(host, rinfo, target);
    1691      123297 :   }
    1692             : 
    1693           0 :   inline void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
    1694             :     DCHECK_EQ(host, rinfo->host());
    1695             :     DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
    1696             :            rinfo->IsPatchedDebugBreakSlotSequence());
    1697           0 :     Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
    1698             :     // The target is always in old space, we don't have to record the slot in
    1699             :     // the old-to-new remembered set.
    1700             :     DCHECK(!collector_->heap()->InNewSpace(target));
    1701           0 :     collector_->RecordRelocSlot(host, rinfo, target);
    1702           0 :   }
    1703             : 
    1704      119094 :   inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
    1705             :     DCHECK_EQ(host, rinfo->host());
    1706             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
    1707             :     HeapObject* object = HeapObject::cast(rinfo->target_object());
    1708      119094 :     collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
    1709      119094 :     collector_->RecordRelocSlot(host, rinfo, object);
    1710      119093 :   }
    1711             : 
    1712         282 :   inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
    1713             :     DCHECK_EQ(host, rinfo->host());
    1714             :     DCHECK(rinfo->rmode() == RelocInfo::CELL);
    1715             :     Cell* cell = rinfo->target_cell();
    1716             :     // The cell is always in old space, we don't have to record the slot in
    1717             :     // the old-to-new remembered set.
    1718             :     DCHECK(!collector_->heap()->InNewSpace(cell));
    1719         282 :     collector_->RecordRelocSlot(host, rinfo, cell);
    1720         282 :   }
    1721             : 
    1722             :   // Entries that will never move.
    1723           0 :   inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
    1724             :     DCHECK_EQ(host, rinfo->host());
    1725             :     DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
    1726             :     Code* stub = rinfo->code_age_stub();
    1727             :     USE(stub);
    1728             :     DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
    1729           0 :   }
    1730             : 
    1731             :   // Entries that are skipped for recording.
    1732           0 :   inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
    1733           0 :   inline void VisitExternalReference(Foreign* host, Address* p) final {}
    1734           0 :   inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
    1735           0 :   inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
    1736             : 
    1737             :  protected:
    1738   138746951 :   inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
    1739             :                                          Address slot) {
    1740   138746951 :     if (value->IsHeapObject()) {
    1741             :       Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
    1742   119797190 :       if (p->InNewSpace()) {
    1743     3245742 :         RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
    1744   116551448 :       } else if (p->IsEvacuationCandidate()) {
    1745     4043613 :         RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
    1746             :       }
    1747             :     }
    1748   138749028 :   }
    1749             : 
    1750             :   MarkCompactCollector* collector_;
    1751             : };
    1752             : 
    1753      237092 : class HeapObjectVisitor {
    1754             :  public:
    1755      237040 :   virtual ~HeapObjectVisitor() {}
    1756             :   virtual bool Visit(HeapObject* object) = 0;
    1757             : };
    1758             : 
    1759      118520 : class EvacuateVisitorBase : public HeapObjectVisitor {
    1760             :  protected:
    1761             :   enum MigrationMode { kFast, kProfiled };
    1762             : 
    1763      118520 :   EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
    1764             :                       RecordMigratedSlotVisitor* record_visitor)
    1765             :       : heap_(heap),
    1766             :         compaction_spaces_(compaction_spaces),
    1767             :         record_visitor_(record_visitor),
    1768             :         profiling_(
    1769      236668 :             heap->isolate()->is_profiling() ||
    1770      236534 :             heap->isolate()->logger()->is_logging_code_events() ||
    1771      355054 :             heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
    1772             : 
    1773    12613306 :   inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
    1774             :                                 HeapObject** target_object) {
    1775             : #ifdef VERIFY_HEAP
    1776             :     if (AbortCompactionForTesting(object)) return false;
    1777             : #endif  // VERIFY_HEAP
    1778    12613306 :     int size = object->Size();
    1779             :     AllocationAlignment alignment = object->RequiredAlignment();
    1780    12607769 :     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
    1781    12632318 :     if (allocation.To(target_object)) {
    1782    12643087 :       MigrateObject(*target_object, object, size, target_space->identity());
    1783    12634532 :       return true;
    1784             :     }
    1785             :     return false;
    1786             :   }
    1787             : 
    1788    27800476 :   inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
    1789             :                             AllocationSpace dest) {
    1790    27800476 :     if (profiling_) {
    1791      280716 :       MigrateObject<kProfiled>(dst, src, size, dest);
    1792             :     } else {
    1793    27519760 :       MigrateObject<kFast>(dst, src, size, dest);
    1794             :     }
    1795    27817524 :   }
    1796             : 
    1797             :   template <MigrationMode mode>
    1798    27892947 :   inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
    1799             :                             AllocationSpace dest) {
    1800    27892947 :     Address dst_addr = dst->address();
    1801    27892947 :     Address src_addr = src->address();
    1802             :     DCHECK(heap_->AllowedToBeMigrated(src, dest));
    1803             :     DCHECK(dest != LO_SPACE);
    1804    27892947 :     if (dest == OLD_SPACE) {
    1805             :       DCHECK_OBJECT_SIZE(size);
    1806             :       DCHECK(IsAligned(size, kPointerSize));
    1807             :       heap_->CopyBlock(dst_addr, src_addr, size);
    1808      201427 :       if ((mode == kProfiled) && dst->IsBytecodeArray()) {
    1809         228 :         PROFILE(heap_->isolate(),
    1810             :                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
    1811             :       }
    1812    12627993 :       dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
    1813    15264528 :     } else if (dest == CODE_SPACE) {
    1814             :       DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
    1815             :       if (mode == kProfiled) {
    1816         248 :         PROFILE(heap_->isolate(),
    1817             :                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
    1818             :       }
    1819             :       heap_->CopyBlock(dst_addr, src_addr, size);
    1820       16629 :       Code::cast(dst)->Relocate(dst_addr - src_addr);
    1821             :       RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
    1822       16629 :       dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
    1823             :     } else {
    1824             :       DCHECK_OBJECT_SIZE(size);
    1825             :       DCHECK(dest == NEW_SPACE);
    1826             :       heap_->CopyBlock(dst_addr, src_addr, size);
    1827             :     }
    1828             :     if (mode == kProfiled) {
    1829      280853 :       heap_->OnMoveEvent(dst, src, size);
    1830             :     }
    1831             :     base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
    1832             :                           reinterpret_cast<base::AtomicWord>(dst_addr));
    1833    27930111 :   }
    1834             : 
    1835             : #ifdef VERIFY_HEAP
    1836             :   bool AbortCompactionForTesting(HeapObject* object) {
    1837             :     if (FLAG_stress_compaction) {
    1838             :       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
    1839             :                              Page::kPageAlignmentMask & ~kPointerAlignmentMask;
    1840             :       if ((reinterpret_cast<uintptr_t>(object->address()) &
    1841             :            Page::kPageAlignmentMask) == mask) {
    1842             :         Page* page = Page::FromAddress(object->address());
    1843             :         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
    1844             :           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
    1845             :         } else {
    1846             :           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
    1847             :           return true;
    1848             :         }
    1849             :       }
    1850             :     }
    1851             :     return false;
    1852             :   }
    1853             : #endif  // VERIFY_HEAP
    1854             : 
    1855             :   Heap* heap_;
    1856             :   CompactionSpaceCollection* compaction_spaces_;
    1857             :   RecordMigratedSlotVisitor* record_visitor_;
    1858             :   bool profiling_;
    1859             : };
    1860             : 
    1861       59260 : class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
    1862             :  public:
    1863             :   static const intptr_t kLabSize = 4 * KB;
    1864             :   static const intptr_t kMaxLabObjectSize = 256;
    1865             : 
    1866       59260 :   explicit EvacuateNewSpaceVisitor(Heap* heap,
    1867             :                                    CompactionSpaceCollection* compaction_spaces,
    1868             :                                    RecordMigratedSlotVisitor* record_visitor,
    1869             :                                    base::HashMap* local_pretenuring_feedback)
    1870             :       : EvacuateVisitorBase(heap, compaction_spaces, record_visitor),
    1871             :         buffer_(LocalAllocationBuffer::InvalidBuffer()),
    1872             :         space_to_allocate_(NEW_SPACE),
    1873             :         promoted_size_(0),
    1874             :         semispace_copied_size_(0),
    1875      118520 :         local_pretenuring_feedback_(local_pretenuring_feedback) {}
    1876             : 
    1877    21627768 :   inline bool Visit(HeapObject* object) override {
    1878             :     heap_->UpdateAllocationSite<Heap::kCached>(object,
    1879    43227659 :                                                local_pretenuring_feedback_);
    1880    21672320 :     int size = object->Size();
    1881    21599891 :     HeapObject* target_object = nullptr;
    1882    49571403 :     if (heap_->ShouldBePromoted(object->address(), size) &&
    1883     6369803 :         TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
    1884     6369830 :                           &target_object)) {
    1885     6367860 :       promoted_size_ += size;
    1886     6367860 :       return true;
    1887             :     }
    1888    15271148 :     HeapObject* target = nullptr;
    1889    15271148 :     AllocationSpace space = AllocateTargetObject(object, &target);
    1890    15246436 :     MigrateObject(HeapObject::cast(target), object, size, space);
    1891    15295304 :     semispace_copied_size_ += size;
    1892    15295304 :     return true;
    1893             :   }
    1894             : 
    1895             :   intptr_t promoted_size() { return promoted_size_; }
    1896             :   intptr_t semispace_copied_size() { return semispace_copied_size_; }
    1897       59260 :   AllocationInfo CloseLAB() { return buffer_.Close(); }
    1898             : 
    1899             :  private:
    1900             :   enum NewSpaceAllocationMode {
    1901             :     kNonstickyBailoutOldSpace,
    1902             :     kStickyBailoutOldSpace,
    1903             :   };
    1904             : 
    1905    15270577 :   inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
    1906           0 :                                               HeapObject** target_object) {
    1907    15270577 :     const int size = old_object->Size();
    1908             :     AllocationAlignment alignment = old_object->RequiredAlignment();
    1909             :     AllocationResult allocation;
    1910    15199323 :     AllocationSpace space_allocated_in = space_to_allocate_;
    1911    15199323 :     if (space_to_allocate_ == NEW_SPACE) {
    1912    15206067 :       if (size > kMaxLabObjectSize) {
    1913             :         allocation =
    1914      279594 :             AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
    1915             :       } else {
    1916    14926473 :         allocation = AllocateInLab(size, alignment);
    1917             :       }
    1918             :     }
    1919    15227282 :     if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
    1920           0 :       allocation = AllocateInOldSpace(size, alignment);
    1921             :       space_allocated_in = OLD_SPACE;
    1922             :     }
    1923             :     bool ok = allocation.To(target_object);
    1924             :     DCHECK(ok);
    1925             :     USE(ok);
    1926    15227282 :     return space_allocated_in;
    1927             :   }
    1928             : 
    1929      207755 :   inline bool NewLocalAllocationBuffer() {
    1930             :     AllocationResult result =
    1931      207755 :         AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
    1932      207842 :     LocalAllocationBuffer saved_old_buffer = buffer_;
    1933      415663 :     buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
    1934      207824 :     if (buffer_.IsValid()) {
    1935             :       buffer_.TryMerge(&saved_old_buffer);
    1936             :       return true;
    1937             :     }
    1938             :     return false;
    1939             :   }
    1940             : 
    1941      487351 :   inline AllocationResult AllocateInNewSpace(int size_in_bytes,
    1942             :                                              AllocationAlignment alignment,
    1943             :                                              NewSpaceAllocationMode mode) {
    1944             :     AllocationResult allocation =
    1945      487351 :         heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
    1946      487458 :     if (allocation.IsRetry()) {
    1947           0 :       if (!heap_->new_space()->AddFreshPageSynchronized()) {
    1948           0 :         if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
    1949             :       } else {
    1950             :         allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
    1951           0 :                                                                  alignment);
    1952           0 :         if (allocation.IsRetry()) {
    1953           0 :           if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
    1954             :         }
    1955             :       }
    1956             :     }
    1957      487458 :     return allocation;
    1958             :   }
    1959             : 
    1960           0 :   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
    1961             :                                              AllocationAlignment alignment) {
    1962             :     AllocationResult allocation =
    1963           0 :         compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
    1964           0 :                                                         alignment);
    1965           0 :     if (allocation.IsRetry()) {
    1966             :       v8::internal::Heap::FatalProcessOutOfMemory(
    1967           0 :           "MarkCompactCollector: semi-space copy, fallback in old gen", true);
    1968             :     }
    1969           0 :     return allocation;
    1970             :   }
    1971             : 
    1972    14935067 :   inline AllocationResult AllocateInLab(int size_in_bytes,
    1973             :                                         AllocationAlignment alignment) {
    1974             :     AllocationResult allocation;
    1975    14935067 :     if (!buffer_.IsValid()) {
    1976       34744 :       if (!NewLocalAllocationBuffer()) {
    1977           0 :         space_to_allocate_ = OLD_SPACE;
    1978             :         return AllocationResult::Retry(OLD_SPACE);
    1979             :       }
    1980             :     }
    1981    14935078 :     allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
    1982    14952323 :     if (allocation.IsRetry()) {
    1983      173012 :       if (!NewLocalAllocationBuffer()) {
    1984           0 :         space_to_allocate_ = OLD_SPACE;
    1985             :         return AllocationResult::Retry(OLD_SPACE);
    1986             :       } else {
    1987      173070 :         allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
    1988      173062 :         if (allocation.IsRetry()) {
    1989           0 :           space_to_allocate_ = OLD_SPACE;
    1990             :           return AllocationResult::Retry(OLD_SPACE);
    1991             :         }
    1992             :       }
    1993             :     }
    1994    14952373 :     return allocation;
    1995             :   }
    1996             : 
    1997             :   LocalAllocationBuffer buffer_;
    1998             :   AllocationSpace space_to_allocate_;
    1999             :   intptr_t promoted_size_;
    2000             :   intptr_t semispace_copied_size_;
    2001             :   base::HashMap* local_pretenuring_feedback_;
    2002             : };
    2003             : 
    2004             : template <PageEvacuationMode mode>
    2005      118520 : class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
    2006             :  public:
    2007             :   explicit EvacuateNewSpacePageVisitor(
    2008             :       Heap* heap, RecordMigratedSlotVisitor* record_visitor,
    2009             :       base::HashMap* local_pretenuring_feedback)
    2010             :       : heap_(heap),
    2011             :         record_visitor_(record_visitor),
    2012             :         moved_bytes_(0),
    2013      118520 :         local_pretenuring_feedback_(local_pretenuring_feedback) {}
    2014             : 
    2015        1157 :   static void Move(Page* page) {
    2016             :     switch (mode) {
    2017             :       case NEW_TO_NEW:
    2018         489 :         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
    2019             :         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
    2020             :         break;
    2021             :       case NEW_TO_OLD: {
    2022         668 :         page->Unlink();
    2023         668 :         Page* new_page = Page::ConvertNewToOld(page);
    2024             :         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
    2025             :         break;
    2026             :       }
    2027             :     }
    2028        1157 :   }
    2029             : 
    2030     6134474 :   inline bool Visit(HeapObject* object) {
    2031     7082582 :     heap_->UpdateAllocationSite<Heap::kCached>(object,
    2032     7082582 :                                                local_pretenuring_feedback_);
    2033             :     if (mode == NEW_TO_OLD) {
    2034     6155679 :       object->IterateBodyFast(record_visitor_);
    2035             :     }
    2036     6153039 :     return true;
    2037             :   }
    2038             : 
    2039             :   intptr_t moved_bytes() { return moved_bytes_; }
    2040        1154 :   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
    2041             : 
    2042             :  private:
    2043             :   Heap* heap_;
    2044             :   RecordMigratedSlotVisitor* record_visitor_;
    2045             :   intptr_t moved_bytes_;
    2046             :   base::HashMap* local_pretenuring_feedback_;
    2047             : };
    2048             : 
    2049       59260 : class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
    2050             :  public:
    2051             :   EvacuateOldSpaceVisitor(Heap* heap,
    2052             :                           CompactionSpaceCollection* compaction_spaces,
    2053             :                           RecordMigratedSlotVisitor* record_visitor)
    2054       59260 :       : EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {}
    2055             : 
    2056     6259237 :   inline bool Visit(HeapObject* object) override {
    2057             :     CompactionSpace* target_space = compaction_spaces_->Get(
    2058    12518474 :         Page::FromAddress(object->address())->owner()->identity());
    2059     6262596 :     HeapObject* target_object = nullptr;
    2060     6262596 :     if (TryEvacuateObject(target_space, object, &target_object)) {
    2061             :       DCHECK(object->map_word().IsForwardingAddress());
    2062             :       return true;
    2063             :     }
    2064          52 :     return false;
    2065             :   }
    2066             : };
    2067             : 
    2068           0 : class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
    2069             :  public:
    2070          52 :   explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
    2071             : 
    2072           0 :   inline bool Visit(HeapObject* object) {
    2073         682 :     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
    2074         682 :     object->IterateBody(&visitor);
    2075           0 :     return true;
    2076             :   }
    2077             : 
    2078             :  private:
    2079             :   Heap* heap_;
    2080             : };
    2081             : 
    2082       20155 : void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
    2083      100010 :   for (Page* p : *space) {
    2084       42652 :     DiscoverGreyObjectsOnPage(p);
    2085       98106 :     if (marking_deque()->IsFull()) return;
    2086             :   }
    2087             : }
    2088             : 
    2089             : 
    2090       13744 : void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
    2091       13744 :   NewSpace* space = heap()->new_space();
    2092       75186 :   for (Page* page : PageRange(space->bottom(), space->top())) {
    2093       16979 :     DiscoverGreyObjectsOnPage(page);
    2094       33960 :     if (marking_deque()->IsFull()) return;
    2095             :   }
    2096             : }
    2097             : 
    2098             : 
    2099       76842 : bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
    2100       76842 :   Object* o = *p;
    2101       76842 :   if (!o->IsHeapObject()) return false;
    2102             :   return ObjectMarking::IsWhite(HeapObject::cast(o),
    2103             :                                 MarkingState::Internal(HeapObject::cast(o)));
    2104             : }
    2105             : 
    2106       53346 : void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
    2107       53346 :   StringTable* string_table = heap()->string_table();
    2108             :   // Mark the string table itself.
    2109       53346 :   if (ObjectMarking::IsWhite(string_table,
    2110             :                              MarkingState::Internal(string_table))) {
    2111             :     // String table could have already been marked by visiting the handles list.
    2112             :     ObjectMarking::WhiteToBlack(string_table,
    2113             :                                 MarkingState::Internal(string_table));
    2114             :   }
    2115             :   // Explicitly mark the prefix.
    2116       53346 :   string_table->IteratePrefix(visitor);
    2117       53346 :   ProcessMarkingDeque();
    2118       53346 : }
    2119             : 
    2120       53346 : void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
    2121             :   // Mark the heap roots including global variables, stack variables,
    2122             :   // etc., and all objects reachable from them.
    2123       53346 :   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
    2124             : 
    2125             :   // Handle the string table specially.
    2126       53346 :   MarkStringTable(visitor);
    2127             : 
    2128             :   // There may be overflowed objects in the heap.  Visit them now.
    2129      106692 :   while (marking_deque()->overflowed()) {
    2130           0 :     RefillMarkingDeque();
    2131           0 :     EmptyMarkingDeque();
    2132             :   }
    2133       53346 : }
    2134             : 
    2135             : // Mark all objects reachable from the objects on the marking stack.
    2136             : // Before: the marking stack contains zero or more heap object pointers.
    2137             : // After: the marking stack is empty, and all objects reachable from the
    2138             : // marking stack have been marked, or are overflowed in the heap.
    2139    77456519 : void MarkCompactCollector::EmptyMarkingDeque() {
    2140   576519474 :   while (!marking_deque()->IsEmpty()) {
    2141             :     HeapObject* object = marking_deque()->Pop();
    2142             : 
    2143             :     DCHECK(!object->IsFiller());
    2144             :     DCHECK(object->IsHeapObject());
    2145             :     DCHECK(heap()->Contains(object));
    2146             :     DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
    2147             :         object, MarkingState::Internal(object))));
    2148             : 
    2149             :     Map* map = object->map();
    2150             :     MarkObject(map);
    2151             :     MarkCompactMarkingVisitor::IterateBody(map, object);
    2152             :   }
    2153    77456377 : }
    2154             : 
    2155             : 
    2156             : // Sweep the heap for overflowed objects, clear their overflow bits, and
    2157             : // push them on the marking stack.  Stop early if the marking stack fills
    2158             : // before sweeping completes.  If sweeping completes, there are no remaining
    2159             : // overflowed objects in the heap so the overflow flag on the markings stack
    2160             : // is cleared.
    2161       13744 : void MarkCompactCollector::RefillMarkingDeque() {
    2162       34839 :   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
    2163             :   DCHECK(marking_deque()->overflowed());
    2164             : 
    2165       13744 :   DiscoverGreyObjectsInNewSpace();
    2166       40292 :   if (marking_deque()->IsFull()) return;
    2167             : 
    2168       13742 :   DiscoverGreyObjectsInSpace(heap()->old_space());
    2169       27484 :   if (marking_deque()->IsFull()) return;
    2170        3803 :   DiscoverGreyObjectsInSpace(heap()->code_space());
    2171        7606 :   if (marking_deque()->IsFull()) return;
    2172        2610 :   DiscoverGreyObjectsInSpace(heap()->map_space());
    2173        5220 :   if (marking_deque()->IsFull()) return;
    2174         940 :   LargeObjectIterator lo_it(heap()->lo_space());
    2175         940 :   DiscoverGreyObjectsWithIterator(&lo_it);
    2176        1880 :   if (marking_deque()->IsFull()) return;
    2177             : 
    2178             :   marking_deque()->ClearOverflowed();
    2179             : }
    2180             : 
    2181             : // Mark all objects reachable (transitively) from objects on the marking
    2182             : // stack.  Before: the marking stack contains zero or more heap object
    2183             : // pointers.  After: the marking stack is empty and there are no overflowed
    2184             : // objects in the heap.
    2185      325493 : void MarkCompactCollector::ProcessMarkingDeque() {
    2186      325493 :   EmptyMarkingDeque();
    2187      664730 :   while (marking_deque()->overflowed()) {
    2188       13744 :     RefillMarkingDeque();
    2189       13744 :     EmptyMarkingDeque();
    2190             :   }
    2191             :   DCHECK(marking_deque()->IsEmpty());
    2192      325493 : }
    2193             : 
    2194             : // Mark all objects reachable (transitively) from objects on the marking
    2195             : // stack including references only considered in the atomic marking pause.
    2196      106692 : void MarkCompactCollector::ProcessEphemeralMarking(
    2197             :     bool only_process_harmony_weak_collections) {
    2198             :   DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
    2199             :   bool work_to_do = true;
    2200      320173 :   while (work_to_do) {
    2201      106789 :     if (!only_process_harmony_weak_collections) {
    2202      106789 :       if (heap_->local_embedder_heap_tracer()->InUse()) {
    2203      106692 :         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
    2204           0 :         heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
    2205             :         heap_->local_embedder_heap_tracer()->Trace(
    2206             :             0,
    2207             :             EmbedderHeapTracer::AdvanceTracingActions(
    2208           0 :                 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
    2209             :       }
    2210             :     } else {
    2211             :       // TODO(mlippautz): We currently do not trace through blink when
    2212             :       // discovering new objects reachable from weak roots (that have been made
    2213             :       // strong). This is a limitation of not having a separate handle type
    2214             :       // that doesn't require zapping before this phase. See crbug.com/668060.
    2215       53346 :       heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
    2216             :     }
    2217      106789 :     ProcessWeakCollections();
    2218      106789 :     work_to_do = !marking_deque()->IsEmpty();
    2219      106789 :     ProcessMarkingDeque();
    2220             :   }
    2221      106692 :   CHECK(marking_deque()->IsEmpty());
    2222      213384 :   CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
    2223      106692 : }
    2224             : 
    2225       53346 : void MarkCompactCollector::ProcessTopOptimizedFrame(
    2226             :     RootMarkingVisitor* visitor) {
    2227      609018 :   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
    2228      502326 :        !it.done(); it.Advance()) {
    2229      516105 :     if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
    2230             :       return;
    2231             :     }
    2232      507646 :     if (it.frame()->type() == StackFrame::OPTIMIZED) {
    2233        5320 :       Code* code = it.frame()->LookupCode();
    2234       10640 :       if (!code->CanDeoptAt(it.frame()->pc())) {
    2235         944 :         Code::BodyDescriptor::IterateBody(code, visitor);
    2236             :       }
    2237        5320 :       ProcessMarkingDeque();
    2238        5320 :       return;
    2239             :     }
    2240             :   }
    2241             : }
    2242             : 
    2243       60789 : void MarkingDeque::SetUp() {
    2244       60789 :   backing_store_ = new base::VirtualMemory(kMaxSize);
    2245       60789 :   backing_store_committed_size_ = 0;
    2246       60789 :   if (backing_store_ == nullptr) {
    2247           0 :     V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
    2248             :   }
    2249       60789 : }
    2250             : 
    2251       59292 : void MarkingDeque::TearDown() {
    2252       59292 :   delete backing_store_;
    2253       59292 : }
    2254             : 
    2255       54557 : void MarkingDeque::StartUsing() {
    2256       54557 :   base::LockGuard<base::Mutex> guard(&mutex_);
    2257       54557 :   if (in_use_) {
    2258             :     // This can happen in mark-compact GC if the incremental marker already
    2259             :     // started using the marking deque.
    2260       54557 :     return;
    2261             :   }
    2262       53380 :   in_use_ = true;
    2263       53380 :   EnsureCommitted();
    2264       53380 :   array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
    2265             :   size_t size = FLAG_force_marking_deque_overflows
    2266             :                     ? 64 * kPointerSize
    2267       53380 :                     : backing_store_committed_size_;
    2268             :   DCHECK(
    2269             :       base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
    2270       53380 :   mask_ = static_cast<int>((size / kPointerSize) - 1);
    2271       53380 :   top_ = bottom_ = 0;
    2272       53380 :   overflowed_ = false;
    2273             : }
    2274             : 
    2275       53353 : void MarkingDeque::StopUsing() {
    2276       53353 :   base::LockGuard<base::Mutex> guard(&mutex_);
    2277      106706 :   if (!in_use_) return;
    2278             :   DCHECK(IsEmpty());
    2279             :   DCHECK(!overflowed_);
    2280       53353 :   top_ = bottom_ = mask_ = 0;
    2281       53353 :   in_use_ = false;
    2282       53353 :   if (FLAG_concurrent_sweeping) {
    2283       53076 :     StartUncommitTask();
    2284             :   } else {
    2285         277 :     Uncommit();
    2286             :   }
    2287             : }
    2288             : 
    2289           0 : void MarkingDeque::Clear() {
    2290             :   DCHECK(in_use_);
    2291          52 :   top_ = bottom_ = 0;
    2292          52 :   overflowed_ = false;
    2293           0 : }
    2294             : 
    2295       53013 : void MarkingDeque::Uncommit() {
    2296             :   DCHECK(!in_use_);
    2297             :   bool success = backing_store_->Uncommit(backing_store_->address(),
    2298       53013 :                                           backing_store_committed_size_);
    2299       53013 :   backing_store_committed_size_ = 0;
    2300       53013 :   CHECK(success);
    2301       53013 : }
    2302             : 
    2303       53380 : void MarkingDeque::EnsureCommitted() {
    2304             :   DCHECK(in_use_);
    2305      106760 :   if (backing_store_committed_size_ > 0) return;
    2306             : 
    2307           0 :   for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
    2308       53106 :     if (backing_store_->Commit(backing_store_->address(), size, false)) {
    2309       53106 :       backing_store_committed_size_ = size;
    2310       53106 :       break;
    2311             :     }
    2312             :   }
    2313       53106 :   if (backing_store_committed_size_ == 0) {
    2314           0 :     V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
    2315             :   }
    2316             : }
    2317             : 
    2318       53076 : void MarkingDeque::StartUncommitTask() {
    2319       53076 :   if (!uncommit_task_pending_) {
    2320       53072 :     uncommit_task_pending_ = true;
    2321      106144 :     UncommitTask* task = new UncommitTask(heap_->isolate(), this);
    2322       53072 :     V8::GetCurrentPlatform()->CallOnBackgroundThread(
    2323       53072 :         task, v8::Platform::kShortRunningTask);
    2324             :   }
    2325       53076 : }
    2326             : 
    2327           0 : class ObjectStatsVisitor : public HeapObjectVisitor {
    2328             :  public:
    2329             :   ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
    2330             :                      ObjectStats* dead_stats)
    2331           0 :       : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) {
    2332             :     DCHECK_NOT_NULL(live_stats);
    2333             :     DCHECK_NOT_NULL(dead_stats);
    2334             :     // Global objects are roots and thus recorded as live.
    2335           0 :     live_collector_.CollectGlobalStatistics();
    2336             :   }
    2337             : 
    2338           0 :   bool Visit(HeapObject* obj) override {
    2339           0 :     if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
    2340           0 :       live_collector_.CollectStatistics(obj);
    2341             :     } else {
    2342             :       DCHECK(!ObjectMarking::IsGrey(obj, MarkingState::Internal(obj)));
    2343           0 :       dead_collector_.CollectStatistics(obj);
    2344             :     }
    2345           0 :     return true;
    2346             :   }
    2347             : 
    2348             :  private:
    2349             :   ObjectStatsCollector live_collector_;
    2350             :   ObjectStatsCollector dead_collector_;
    2351             : };
    2352             : 
    2353           0 : void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
    2354           0 :   SpaceIterator space_it(heap());
    2355             :   HeapObject* obj = nullptr;
    2356           0 :   while (space_it.has_next()) {
    2357           0 :     std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
    2358             :     ObjectIterator* obj_it = it.get();
    2359           0 :     while ((obj = obj_it->Next()) != nullptr) {
    2360           0 :       visitor->Visit(obj);
    2361             :     }
    2362           0 :   }
    2363           0 : }
    2364             : 
    2365       53346 : void MarkCompactCollector::RecordObjectStats() {
    2366       53346 :   if (V8_UNLIKELY(FLAG_gc_stats)) {
    2367           0 :     heap()->CreateObjectStats();
    2368             :     ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
    2369           0 :                                heap()->dead_object_stats_);
    2370           0 :     VisitAllObjects(&visitor);
    2371           0 :     if (V8_UNLIKELY(FLAG_gc_stats &
    2372             :                     v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
    2373           0 :       std::stringstream live, dead;
    2374           0 :       heap()->live_object_stats_->Dump(live);
    2375           0 :       heap()->dead_object_stats_->Dump(dead);
    2376           0 :       TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
    2377             :                            "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
    2378             :                            "live", TRACE_STR_COPY(live.str().c_str()), "dead",
    2379           0 :                            TRACE_STR_COPY(dead.str().c_str()));
    2380             :     }
    2381           0 :     if (FLAG_trace_gc_object_stats) {
    2382           0 :       heap()->live_object_stats_->PrintJSON("live");
    2383           0 :       heap()->dead_object_stats_->PrintJSON("dead");
    2384             :     }
    2385           0 :     heap()->live_object_stats_->CheckpointObjectStats();
    2386           0 :     heap()->dead_object_stats_->ClearObjectStats();
    2387             :   }
    2388       53346 : }
    2389             : 
    2390           0 : SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
    2391             :     Heap* heap, Address slot_address) {
    2392           0 :   Object* object = *reinterpret_cast<Object**>(slot_address);
    2393           0 :   if (heap->InNewSpace(object)) {
    2394             :     // Marking happens before flipping the young generation, so the object
    2395             :     // has to be in ToSpace.
    2396             :     DCHECK(heap->InToSpace(object));
    2397             :     HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
    2398             :     const MarkingState state = MarkingState::External(heap_object);
    2399           0 :     if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(heap_object, state)) {
    2400             :       return KEEP_SLOT;
    2401             :     }
    2402             :     ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state);
    2403             :     StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
    2404             :                                                      heap_object);
    2405           0 :     return KEEP_SLOT;
    2406             :   }
    2407             :   return REMOVE_SLOT;
    2408             : }
    2409             : 
    2410           0 : static bool IsUnmarkedObject(Heap* heap, Object** p) {
    2411             :   DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
    2412           0 :   return heap->InNewSpace(*p) &&
    2413             :          !ObjectMarking::IsBlack(HeapObject::cast(*p),
    2414           0 :                                  MarkingState::Internal(HeapObject::cast(*p)));
    2415             : }
    2416             : 
    2417           0 : void MinorMarkCompactCollector::MarkLiveObjects() {
    2418           0 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
    2419             : 
    2420             :   PostponeInterruptsScope postpone(isolate());
    2421             : 
    2422             :   StaticYoungGenerationMarkingVisitor::Initialize(heap());
    2423             :   RootMarkingVisitor root_visitor(this);
    2424             : 
    2425           0 :   marking_deque()->StartUsing();
    2426             : 
    2427             :   isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
    2428           0 :       &Heap::IsUnmodifiedHeapObject);
    2429             : 
    2430             :   {
    2431           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
    2432           0 :     heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
    2433           0 :     ProcessMarkingDeque();
    2434             :   }
    2435             : 
    2436             :   {
    2437           0 :     TRACE_GC(heap()->tracer(),
    2438             :              GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
    2439             :     RememberedSet<OLD_TO_NEW>::Iterate(
    2440             :         heap(), NON_SYNCHRONIZED,
    2441           0 :         [this](Address addr) { return CheckAndMarkObject(heap(), addr); });
    2442             :     RememberedSet<OLD_TO_NEW>::IterateTyped(
    2443             :         heap(), NON_SYNCHRONIZED,
    2444             :         [this](SlotType type, Address host_addr, Address addr) {
    2445             :           return UpdateTypedSlotHelper::UpdateTypedSlot(
    2446             :               isolate(), type, addr, [this](Object** addr) {
    2447             :                 return CheckAndMarkObject(heap(),
    2448           0 :                                           reinterpret_cast<Address>(addr));
    2449           0 :               });
    2450           0 :         });
    2451           0 :     ProcessMarkingDeque();
    2452             :   }
    2453             : 
    2454             :   {
    2455           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
    2456           0 :     heap()->IterateEncounteredWeakCollections(&root_visitor);
    2457           0 :     ProcessMarkingDeque();
    2458             :   }
    2459             : 
    2460             :   {
    2461           0 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
    2462             :     isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
    2463           0 :         &IsUnmarkedObject);
    2464             :     isolate()
    2465             :         ->global_handles()
    2466             :         ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
    2467           0 :             &root_visitor);
    2468           0 :     ProcessMarkingDeque();
    2469             :   }
    2470             : 
    2471           0 :   marking_deque()->StopUsing();
    2472           0 : }
    2473             : 
    2474           0 : void MinorMarkCompactCollector::ProcessMarkingDeque() {
    2475           0 :   EmptyMarkingDeque();
    2476             :   DCHECK(!marking_deque()->overflowed());
    2477             :   DCHECK(marking_deque()->IsEmpty());
    2478           0 : }
    2479             : 
    2480           0 : void MinorMarkCompactCollector::EmptyMarkingDeque() {
    2481           0 :   while (!marking_deque()->IsEmpty()) {
    2482             :     HeapObject* object = marking_deque()->Pop();
    2483             : 
    2484             :     DCHECK(!object->IsFiller());
    2485             :     DCHECK(object->IsHeapObject());
    2486             :     DCHECK(heap()->Contains(object));
    2487             : 
    2488             :     DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
    2489             :         object, MarkingState::External(object))));
    2490             : 
    2491             :     Map* map = object->map();
    2492             :     DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
    2493             :         object, MarkingState::External(object))));
    2494             :     StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
    2495             :   }
    2496           0 : }
    2497             : 
    2498           0 : void MinorMarkCompactCollector::CollectGarbage() {
    2499           0 :   MarkLiveObjects();
    2500             : 
    2501             : #ifdef VERIFY_HEAP
    2502             :   if (FLAG_verify_heap) {
    2503             :     YoungGenerationMarkingVerifier verifier(heap());
    2504             :     verifier.Run();
    2505             :   }
    2506             : #endif  // VERIFY_HEAP
    2507           0 : }
    2508             : 
    2509       53346 : void MarkCompactCollector::MarkLiveObjects() {
    2510      853536 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
    2511             :   // The recursive GC marker detects when it is nearing stack overflow,
    2512             :   // and switches to a different marking system.  JS interrupts interfere
    2513             :   // with the C stack limit check.
    2514             :   PostponeInterruptsScope postpone(isolate());
    2515             : 
    2516             :   {
    2517      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
    2518      106692 :     IncrementalMarking* incremental_marking = heap_->incremental_marking();
    2519       53346 :     if (was_marked_incrementally_) {
    2520        1125 :       incremental_marking->Finalize();
    2521             :     } else {
    2522       52221 :       CHECK(incremental_marking->IsStopped());
    2523       53346 :     }
    2524             :   }
    2525             : 
    2526             : #ifdef DEBUG
    2527             :   DCHECK(state_ == PREPARE_GC);
    2528             :   state_ = MARK_LIVE_OBJECTS;
    2529             : #endif
    2530             : 
    2531       53346 :   marking_deque()->StartUsing();
    2532             : 
    2533      106692 :   heap_->local_embedder_heap_tracer()->EnterFinalPause();
    2534             : 
    2535             :   {
    2536      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
    2537      106692 :     PrepareForCodeFlushing();
    2538             :   }
    2539             : 
    2540             :   RootMarkingVisitor root_visitor(heap());
    2541             : 
    2542             :   {
    2543      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
    2544       53346 :     MarkRoots(&root_visitor);
    2545      106692 :     ProcessTopOptimizedFrame(&root_visitor);
    2546             :   }
    2547             : 
    2548             :   {
    2549      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
    2550             : 
    2551             :     // The objects reachable from the roots are marked, yet unreachable
    2552             :     // objects are unmarked.  Mark objects reachable due to host
    2553             :     // application specific logic or through Harmony weak maps.
    2554             :     {
    2555      213384 :       TRACE_GC(heap()->tracer(),
    2556             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
    2557      106692 :       ProcessEphemeralMarking(false);
    2558             :     }
    2559             : 
    2560             :     // The objects reachable from the roots, weak maps or object groups
    2561             :     // are marked. Objects pointed to only by weak global handles cannot be
    2562             :     // immediately reclaimed. Instead, we have to mark them as pending and mark
    2563             :     // objects reachable from them.
    2564             :     //
    2565             :     // First we identify nonlive weak handles and mark them as pending
    2566             :     // destruction.
    2567             :     {
    2568      213384 :       TRACE_GC(heap()->tracer(),
    2569             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
    2570             :       heap()->isolate()->global_handles()->IdentifyWeakHandles(
    2571       53346 :           &IsUnmarkedHeapObject);
    2572      106692 :       ProcessMarkingDeque();
    2573             :     }
    2574             :     // Then we mark the objects.
    2575             : 
    2576             :     {
    2577      213384 :       TRACE_GC(heap()->tracer(),
    2578             :                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
    2579       53346 :       heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
    2580      106692 :       ProcessMarkingDeque();
    2581             :     }
    2582             : 
    2583             :     // Repeat Harmony weak maps marking to mark unmarked objects reachable from
    2584             :     // the weak roots we just marked as pending destruction.
    2585             :     //
    2586             :     // We only process harmony collections, as all object groups have been fully
    2587             :     // processed and no weakly reachable node can discover new objects groups.
    2588             :     {
    2589      213384 :       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
    2590       53346 :       ProcessEphemeralMarking(true);
    2591             :       {
    2592      213384 :         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
    2593      106692 :         heap()->local_embedder_heap_tracer()->TraceEpilogue();
    2594       53346 :       }
    2595       53346 :     }
    2596       53346 :   }
    2597       53346 : }
    2598             : 
    2599             : 
    2600      106692 : void MarkCompactCollector::ClearNonLiveReferences() {
    2601      640152 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
    2602             : 
    2603             :   {
    2604      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
    2605             : 
    2606             :     // Prune the string table removing all strings only pointed to by the
    2607             :     // string table.  Cannot use string_table() here because the string
    2608             :     // table is marked.
    2609       53346 :     StringTable* string_table = heap()->string_table();
    2610             :     InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
    2611       53346 :     string_table->IterateElements(&internalized_visitor);
    2612       53346 :     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
    2613             : 
    2614             :     ExternalStringTableCleaner external_visitor(heap());
    2615       53346 :     heap()->external_string_table_.IterateAll(&external_visitor);
    2616      106692 :     heap()->external_string_table_.CleanUpAll();
    2617             :   }
    2618             : 
    2619             :   {
    2620      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
    2621             :     // Process the weak references.
    2622       53346 :     MarkCompactWeakObjectRetainer mark_compact_object_retainer;
    2623      106692 :     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
    2624             :   }
    2625             : 
    2626             :   // Flush code from collected candidates.
    2627       53346 :   if (is_code_flushing_enabled()) {
    2628      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
    2629      106692 :     code_flusher_->ProcessCandidates();
    2630             :   }
    2631             : 
    2632             : 
    2633             :   DependentCode* dependent_code_list;
    2634             :   Object* non_live_map_list;
    2635       53346 :   ClearWeakCells(&non_live_map_list, &dependent_code_list);
    2636             : 
    2637             :   {
    2638      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
    2639       53346 :     ClearSimpleMapTransitions(non_live_map_list);
    2640      106692 :     ClearFullMapTransitions();
    2641             :   }
    2642             : 
    2643       53346 :   MarkDependentCodeForDeoptimization(dependent_code_list);
    2644             : 
    2645      106692 :   ClearWeakCollections();
    2646       53346 : }
    2647             : 
    2648             : 
    2649       53346 : void MarkCompactCollector::MarkDependentCodeForDeoptimization(
    2650             :     DependentCode* list_head) {
    2651      213384 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
    2652             :   Isolate* isolate = this->isolate();
    2653             :   DependentCode* current = list_head;
    2654      122980 :   while (current->length() > 0) {
    2655             :     have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
    2656       16288 :         isolate, DependentCode::kWeakCodeGroup);
    2657             :     current = current->next_link();
    2658             :   }
    2659             : 
    2660             :   {
    2661      138200 :     ArrayList* list = heap_->weak_new_space_object_to_code_list();
    2662             :     int counter = 0;
    2663     1073730 :     for (int i = 0; i < list->Length(); i += 2) {
    2664             :       WeakCell* obj = WeakCell::cast(list->Get(i));
    2665             :       WeakCell* dep = WeakCell::cast(list->Get(i + 1));
    2666     1785015 :       if (obj->cleared() || dep->cleared()) {
    2667      297205 :         if (!dep->cleared()) {
    2668             :           Code* code = Code::cast(dep->value());
    2669          51 :           if (!code->marked_for_deoptimization()) {
    2670             :             DependentCode::SetMarkedForDeoptimization(
    2671          15 :                 code, DependentCode::DependencyGroup::kWeakCodeGroup);
    2672          15 :             code->InvalidateEmbeddedObjects();
    2673          15 :             have_code_to_deoptimize_ = true;
    2674             :           }
    2675             :         }
    2676             :       } else {
    2677             :         // We record the slot manually because marking is finished at this
    2678             :         // point and the write barrier would bailout.
    2679             :         list->Set(counter, obj, SKIP_WRITE_BARRIER);
    2680             :         RecordSlot(list, list->Slot(counter), obj);
    2681             :         counter++;
    2682             :         list->Set(counter, dep, SKIP_WRITE_BARRIER);
    2683             :         RecordSlot(list, list->Slot(counter), dep);
    2684             :         counter++;
    2685             :       }
    2686             :     }
    2687             :   }
    2688             : 
    2689       53346 :   WeakHashTable* table = heap_->weak_object_to_code_table();
    2690       53346 :   uint32_t capacity = table->Capacity();
    2691     1975714 :   for (uint32_t i = 0; i < capacity; i++) {
    2692     1922368 :     uint32_t key_index = table->EntryToIndex(i);
    2693             :     Object* key = table->get(key_index);
    2694     1922368 :     if (!table->IsKey(isolate, key)) continue;
    2695             :     uint32_t value_index = table->EntryToValueIndex(i);
    2696             :     Object* value = table->get(value_index);
    2697             :     DCHECK(key->IsWeakCell());
    2698      177102 :     if (WeakCell::cast(key)->cleared()) {
    2699             :       have_code_to_deoptimize_ |=
    2700             :           DependentCode::cast(value)->MarkCodeForDeoptimization(
    2701       15754 :               isolate, DependentCode::kWeakCodeGroup);
    2702       31508 :       table->set(key_index, heap_->the_hole_value());
    2703       31508 :       table->set(value_index, heap_->the_hole_value());
    2704       15754 :       table->ElementRemoved();
    2705             :     }
    2706       53346 :   }
    2707       53346 : }
    2708             : 
    2709             : 
    2710       53346 : void MarkCompactCollector::ClearSimpleMapTransitions(
    2711             :     Object* non_live_map_list) {
    2712       53346 :   Object* the_hole_value = heap()->the_hole_value();
    2713             :   Object* weak_cell_obj = non_live_map_list;
    2714      776093 :   while (weak_cell_obj != Smi::kZero) {
    2715             :     WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
    2716             :     Map* map = Map::cast(weak_cell->value());
    2717             :     DCHECK(ObjectMarking::IsWhite(map, MarkingState::Internal(map)));
    2718             :     Object* potential_parent = map->constructor_or_backpointer();
    2719      669401 :     if (potential_parent->IsMap()) {
    2720             :       Map* parent = Map::cast(potential_parent);
    2721      575082 :       if (ObjectMarking::IsBlackOrGrey(parent,
    2722      714961 :                                        MarkingState::Internal(parent)) &&
    2723             :           parent->raw_transitions() == weak_cell) {
    2724      132226 :         ClearSimpleMapTransition(parent, map);
    2725             :       }
    2726             :     }
    2727             :     weak_cell->clear();
    2728             :     weak_cell_obj = weak_cell->next();
    2729             :     weak_cell->clear_next(the_hole_value);
    2730             :   }
    2731       53346 : }
    2732             : 
    2733             : 
    2734      132226 : void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
    2735             :                                                     Map* dead_transition) {
    2736             :   // A previously existing simple transition (stored in a WeakCell) is going
    2737             :   // to be cleared. Clear the useless cell pointer, and take ownership
    2738             :   // of the descriptor array.
    2739      132226 :   map->set_raw_transitions(Smi::kZero);
    2740             :   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
    2741             :   DescriptorArray* descriptors = map->instance_descriptors();
    2742      132226 :   if (descriptors == dead_transition->instance_descriptors() &&
    2743             :       number_of_own_descriptors > 0) {
    2744       12016 :     TrimDescriptorArray(map, descriptors);
    2745             :     DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
    2746             :     map->set_owns_descriptors(true);
    2747             :   }
    2748      132226 : }
    2749             : 
    2750             : 
    2751       53346 : void MarkCompactCollector::ClearFullMapTransitions() {
    2752      106692 :   HeapObject* undefined = heap()->undefined_value();
    2753       53346 :   Object* obj = heap()->encountered_transition_arrays();
    2754      985089 :   while (obj != Smi::kZero) {
    2755             :     TransitionArray* array = TransitionArray::cast(obj);
    2756             :     int num_transitions = array->number_of_entries();
    2757             :     DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
    2758      878397 :     if (num_transitions > 0) {
    2759             :       Map* map = array->GetTarget(0);
    2760             :       Map* parent = Map::cast(map->constructor_or_backpointer());
    2761             :       bool parent_is_alive =
    2762             :           ObjectMarking::IsBlackOrGrey(parent, MarkingState::Internal(parent));
    2763             :       DescriptorArray* descriptors =
    2764      681220 :           parent_is_alive ? parent->instance_descriptors() : nullptr;
    2765             :       bool descriptors_owner_died =
    2766      681220 :           CompactTransitionArray(parent, array, descriptors);
    2767      681220 :       if (descriptors_owner_died) {
    2768        2953 :         TrimDescriptorArray(parent, descriptors);
    2769             :       }
    2770             :     }
    2771             :     obj = array->next_link();
    2772             :     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
    2773             :   }
    2774             :   heap()->set_encountered_transition_arrays(Smi::kZero);
    2775       53346 : }
    2776             : 
    2777             : 
    2778      681220 : bool MarkCompactCollector::CompactTransitionArray(
    2779             :     Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
    2780             :   int num_transitions = transitions->number_of_entries();
    2781             :   bool descriptors_owner_died = false;
    2782             :   int transition_index = 0;
    2783             :   // Compact all live transitions to the left.
    2784     2201805 :   for (int i = 0; i < num_transitions; ++i) {
    2785             :     Map* target = transitions->GetTarget(i);
    2786             :     DCHECK_EQ(target->constructor_or_backpointer(), map);
    2787     1520585 :     if (ObjectMarking::IsWhite(target, MarkingState::Internal(target))) {
    2788      263854 :       if (descriptors != nullptr &&
    2789             :           target->instance_descriptors() == descriptors) {
    2790             :         descriptors_owner_died = true;
    2791             :       }
    2792             :     } else {
    2793     1388658 :       if (i != transition_index) {
    2794             :         Name* key = transitions->GetKey(i);
    2795             :         transitions->SetKey(transition_index, key);
    2796             :         Object** key_slot = transitions->GetKeySlot(transition_index);
    2797             :         RecordSlot(transitions, key_slot, key);
    2798             :         // Target slots do not need to be recorded since maps are not compacted.
    2799             :         transitions->SetTarget(transition_index, transitions->GetTarget(i));
    2800             :       }
    2801     1388658 :       transition_index++;
    2802             :     }
    2803             :   }
    2804             :   // If there are no transitions to be cleared, return.
    2805      681220 :   if (transition_index == num_transitions) {
    2806             :     DCHECK(!descriptors_owner_died);
    2807             :     return false;
    2808             :   }
    2809             :   // Note that we never eliminate a transition array, though we might right-trim
    2810             :   // such that number_of_transitions() == 0. If this assumption changes,
    2811             :   // TransitionArray::Insert() will need to deal with the case that a transition
    2812             :   // array disappeared during GC.
    2813       49336 :   int trim = TransitionArray::Capacity(transitions) - transition_index;
    2814       49336 :   if (trim > 0) {
    2815             :     heap_->RightTrimFixedArray(transitions,
    2816       49336 :                                trim * TransitionArray::kTransitionSize);
    2817             :     transitions->SetNumberOfTransitions(transition_index);
    2818             :   }
    2819       49336 :   return descriptors_owner_died;
    2820             : }
    2821             : 
    2822             : 
    2823       14969 : void MarkCompactCollector::TrimDescriptorArray(Map* map,
    2824             :                                                DescriptorArray* descriptors) {
    2825             :   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
    2826       14969 :   if (number_of_own_descriptors == 0) {
    2827             :     DCHECK(descriptors == heap_->empty_descriptor_array());
    2828       14969 :     return;
    2829             :   }
    2830             : 
    2831             :   int number_of_descriptors = descriptors->number_of_descriptors_storage();
    2832       14773 :   int to_trim = number_of_descriptors - number_of_own_descriptors;
    2833       14773 :   if (to_trim > 0) {
    2834             :     heap_->RightTrimFixedArray(descriptors,
    2835       14592 :                                to_trim * DescriptorArray::kEntrySize);
    2836             :     descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
    2837             : 
    2838       14592 :     if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
    2839       14592 :     descriptors->Sort();
    2840             : 
    2841             :     if (FLAG_unbox_double_fields) {
    2842             :       LayoutDescriptor* layout_descriptor = map->layout_descriptor();
    2843             :       layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
    2844       14592 :                                                   number_of_own_descriptors);
    2845             :       SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
    2846             :     }
    2847             :   }
    2848             :   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
    2849             :   map->set_owns_descriptors(true);
    2850             : }
    2851             : 
    2852             : 
    2853       12149 : void MarkCompactCollector::TrimEnumCache(Map* map,
    2854             :                                          DescriptorArray* descriptors) {
    2855             :   int live_enum = map->EnumLength();
    2856       12149 :   if (live_enum == kInvalidEnumCacheSentinel) {
    2857             :     live_enum =
    2858        6883 :         map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
    2859             :   }
    2860       12149 :   if (live_enum == 0) return descriptors->ClearEnumCache();
    2861             : 
    2862       12143 :   FixedArray* enum_cache = descriptors->GetEnumCache();
    2863             : 
    2864       12143 :   int to_trim = enum_cache->length() - live_enum;
    2865       12143 :   if (to_trim <= 0) return;
    2866       12128 :   heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
    2867             : 
    2868       12128 :   if (!descriptors->HasEnumIndicesCache()) return;
    2869       12121 :   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
    2870       12121 :   heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
    2871             : }
    2872             : 
    2873             : 
    2874      106789 : void MarkCompactCollector::ProcessWeakCollections() {
    2875      106789 :   Object* weak_collection_obj = heap()->encountered_weak_collections();
    2876      218105 :   while (weak_collection_obj != Smi::kZero) {
    2877             :     JSWeakCollection* weak_collection =
    2878             :         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
    2879             :     DCHECK(ObjectMarking::IsBlackOrGrey(
    2880             :         weak_collection, MarkingState::Internal(weak_collection)));
    2881        4527 :     if (weak_collection->table()->IsHashTable()) {
    2882             :       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
    2883       64470 :       for (int i = 0; i < table->Capacity(); i++) {
    2884             :         HeapObject* heap_object = HeapObject::cast(table->KeyAt(i));
    2885       27708 :         if (ObjectMarking::IsBlackOrGrey(heap_object,
    2886             :                                          MarkingState::Internal(heap_object))) {
    2887             :           Object** key_slot =
    2888             :               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
    2889       27320 :           RecordSlot(table, key_slot, *key_slot);
    2890             :           Object** value_slot =
    2891             :               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
    2892             :           MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
    2893             :                                                          value_slot);
    2894             :         }
    2895             :       }
    2896             :     }
    2897             :     weak_collection_obj = weak_collection->next();
    2898             :   }
    2899      106789 : }
    2900             : 
    2901             : 
    2902       53346 : void MarkCompactCollector::ClearWeakCollections() {
    2903      322291 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
    2904       53346 :   Object* weak_collection_obj = heap()->encountered_weak_collections();
    2905      108907 :   while (weak_collection_obj != Smi::kZero) {
    2906             :     JSWeakCollection* weak_collection =
    2907             :         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
    2908             :     DCHECK(ObjectMarking::IsBlackOrGrey(
    2909             :         weak_collection, MarkingState::Internal(weak_collection)));
    2910        2215 :     if (weak_collection->table()->IsHashTable()) {
    2911             :       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
    2912       30838 :       for (int i = 0; i < table->Capacity(); i++) {
    2913             :         HeapObject* key = HeapObject::cast(table->KeyAt(i));
    2914       13204 :         if (!ObjectMarking::IsBlackOrGrey(key, MarkingState::Internal(key))) {
    2915          39 :           table->RemoveEntry(i);
    2916             :         }
    2917             :       }
    2918             :     }
    2919             :     weak_collection_obj = weak_collection->next();
    2920        2215 :     weak_collection->set_next(heap()->undefined_value());
    2921             :   }
    2922       53346 :   heap()->set_encountered_weak_collections(Smi::kZero);
    2923       53346 : }
    2924             : 
    2925             : 
    2926          52 : void MarkCompactCollector::AbortWeakCollections() {
    2927         104 :   Object* weak_collection_obj = heap()->encountered_weak_collections();
    2928         104 :   while (weak_collection_obj != Smi::kZero) {
    2929             :     JSWeakCollection* weak_collection =
    2930             :         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
    2931             :     weak_collection_obj = weak_collection->next();
    2932           0 :     weak_collection->set_next(heap()->undefined_value());
    2933             :   }
    2934             :   heap()->set_encountered_weak_collections(Smi::kZero);
    2935          52 : }
    2936             : 
    2937             : 
    2938       53346 : void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
    2939             :                                           DependentCode** dependent_code_list) {
    2940      106692 :   Heap* heap = this->heap();
    2941      213384 :   TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
    2942             :   Object* weak_cell_obj = heap->encountered_weak_cells();
    2943             :   Object* the_hole_value = heap->the_hole_value();
    2944             :   DependentCode* dependent_code_head =
    2945             :       DependentCode::cast(heap->empty_fixed_array());
    2946             :   Object* non_live_map_head = Smi::kZero;
    2947    53980931 :   while (weak_cell_obj != Smi::kZero) {
    2948             :     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
    2949             :     Object* next_weak_cell = weak_cell->next();
    2950             :     bool clear_value = true;
    2951             :     bool clear_next = true;
    2952             :     // We do not insert cleared weak cells into the list, so the value
    2953             :     // cannot be a Smi here.
    2954             :     HeapObject* value = HeapObject::cast(weak_cell->value());
    2955    53874239 :     if (!ObjectMarking::IsBlackOrGrey(value, MarkingState::Internal(value))) {
    2956             :       // Cells for new-space objects embedded in optimized code are wrapped in
    2957             :       // WeakCell and put into Heap::weak_object_to_code_table.
    2958             :       // Such cells do not have any strong references but we want to keep them
    2959             :       // alive as long as the cell value is alive.
    2960             :       // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
    2961     2544872 :       if (value->IsCell()) {
    2962             :         Object* cell_value = Cell::cast(value)->value();
    2963      768940 :         if (cell_value->IsHeapObject() &&
    2964             :             ObjectMarking::IsBlackOrGrey(
    2965             :                 HeapObject::cast(cell_value),
    2966             :                 MarkingState::Internal(HeapObject::cast(cell_value)))) {
    2967             :           // Resurrect the cell.
    2968             :           ObjectMarking::WhiteToBlack(value, MarkingState::Internal(value));
    2969          30 :           Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
    2970          30 :           RecordSlot(value, slot, *slot);
    2971          30 :           slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
    2972          30 :           RecordSlot(weak_cell, slot, *slot);
    2973             :           clear_value = false;
    2974             :         }
    2975             :       }
    2976     2544872 :       if (value->IsMap()) {
    2977             :         // The map is non-live.
    2978             :         Map* map = Map::cast(value);
    2979             :         // Add dependent code to the dependent_code_list.
    2980             :         DependentCode* candidate = map->dependent_code();
    2981             :         // We rely on the fact that the weak code group comes first.
    2982             :         STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
    2983      686805 :         if (candidate->length() > 0 &&
    2984             :             candidate->group() == DependentCode::kWeakCodeGroup) {
    2985             :           candidate->set_next_link(dependent_code_head);
    2986             :           dependent_code_head = candidate;
    2987             :         }
    2988             :         // Add the weak cell to the non_live_map list.
    2989      669401 :         weak_cell->set_next(non_live_map_head);
    2990             :         non_live_map_head = weak_cell;
    2991             :         clear_value = false;
    2992             :         clear_next = false;
    2993             :       }
    2994             :     } else {
    2995             :       // The value of the weak cell is alive.
    2996    51329367 :       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
    2997    51329367 :       RecordSlot(weak_cell, slot, *slot);
    2998             :       clear_value = false;
    2999             :     }
    3000    53874239 :     if (clear_value) {
    3001             :       weak_cell->clear();
    3002             :     }
    3003    53874239 :     if (clear_next) {
    3004             :       weak_cell->clear_next(the_hole_value);
    3005             :     }
    3006             :     weak_cell_obj = next_weak_cell;
    3007             :   }
    3008             :   heap->set_encountered_weak_cells(Smi::kZero);
    3009       53346 :   *non_live_map_list = non_live_map_head;
    3010      106692 :   *dependent_code_list = dependent_code_head;
    3011       53346 : }
    3012             : 
    3013             : 
    3014          52 : void MarkCompactCollector::AbortWeakCells() {
    3015         104 :   Object* the_hole_value = heap()->the_hole_value();
    3016          52 :   Object* weak_cell_obj = heap()->encountered_weak_cells();
    3017        3341 :   while (weak_cell_obj != Smi::kZero) {
    3018             :     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
    3019             :     weak_cell_obj = weak_cell->next();
    3020             :     weak_cell->clear_next(the_hole_value);
    3021             :   }
    3022             :   heap()->set_encountered_weak_cells(Smi::kZero);
    3023          52 : }
    3024             : 
    3025             : 
    3026          52 : void MarkCompactCollector::AbortTransitionArrays() {
    3027         104 :   HeapObject* undefined = heap()->undefined_value();
    3028          52 :   Object* obj = heap()->encountered_transition_arrays();
    3029         577 :   while (obj != Smi::kZero) {
    3030             :     TransitionArray* array = TransitionArray::cast(obj);
    3031             :     obj = array->next_link();
    3032             :     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
    3033             :   }
    3034             :   heap()->set_encountered_transition_arrays(Smi::kZero);
    3035          52 : }
    3036             : 
    3037   346055828 : void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
    3038             :                                            Object* target) {
    3039             :   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
    3040             :   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
    3041   691304928 :   if (target_page->IsEvacuationCandidate() &&
    3042      329298 :       (rinfo->host() == NULL ||
    3043             :        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
    3044             :     RelocInfo::Mode rmode = rinfo->rmode();
    3045             :     Address addr = rinfo->pc();
    3046      238715 :     SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
    3047      238715 :     if (rinfo->IsInConstantPool()) {
    3048             :       addr = rinfo->constant_pool_entry_address();
    3049             :       if (RelocInfo::IsCodeTarget(rmode)) {
    3050             :         slot_type = CODE_ENTRY_SLOT;
    3051             :       } else {
    3052             :         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
    3053             :         slot_type = OBJECT_SLOT;
    3054             :       }
    3055             :     }
    3056             :     RememberedSet<OLD_TO_OLD>::InsertTyped(
    3057      238715 :         source_page, reinterpret_cast<Address>(host), slot_type, addr);
    3058             :   }
    3059   345487814 : }
    3060             : 
    3061   330237883 : static inline SlotCallbackResult UpdateSlot(Object** slot) {
    3062             :   Object* obj = reinterpret_cast<Object*>(
    3063   330237883 :       base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
    3064             : 
    3065   330237883 :   if (obj->IsHeapObject()) {
    3066             :     HeapObject* heap_obj = HeapObject::cast(obj);
    3067             :     MapWord map_word = heap_obj->map_word();
    3068   305906021 :     if (map_word.IsForwardingAddress()) {
    3069             :       DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
    3070             :              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
    3071             :              Page::FromAddress(heap_obj->address())
    3072             :                  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
    3073    20065884 :       HeapObject* target = map_word.ToForwardingAddress();
    3074             :       base::NoBarrier_CompareAndSwap(
    3075             :           reinterpret_cast<base::AtomicWord*>(slot),
    3076             :           reinterpret_cast<base::AtomicWord>(obj),
    3077             :           reinterpret_cast<base::AtomicWord>(target));
    3078             :       DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
    3079             :       DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
    3080             :     }
    3081             :   }
    3082   330237883 :   return REMOVE_SLOT;
    3083             : }
    3084             : 
    3085             : // Visitor for updating root pointers and to-space pointers.
    3086             : // It does not expect to encounter pointers to dead objects.
    3087             : // TODO(ulan): Remove code object specific functions. This visitor
    3088             : // nevers visits code objects.
    3089      106692 : class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
    3090             :  public:
    3091        2453 :   void VisitPointer(HeapObject* host, Object** p) override { UpdateSlot(p); }
    3092             : 
    3093    13077358 :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
    3094    13077358 :     for (Object** p = start; p < end; p++) UpdateSlot(p);
    3095    13093583 :   }
    3096             : 
    3097   134078678 :   void VisitRootPointer(Root root, Object** p) override { UpdateSlot(p); }
    3098             : 
    3099     1399565 :   void VisitRootPointers(Root root, Object** start, Object** end) override {
    3100     1399565 :     for (Object** p = start; p < end; p++) UpdateSlot(p);
    3101     1399565 :   }
    3102             : 
    3103           0 :   void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
    3104           0 :     UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
    3105           0 :   }
    3106             : 
    3107           0 :   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
    3108           0 :     UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
    3109           0 :   }
    3110             : 
    3111           0 :   void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
    3112           0 :     UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
    3113           0 :   }
    3114             : 
    3115     1183851 :   void VisitCodeEntry(JSFunction* host, Address entry_address) override {
    3116             :     UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
    3117     1183855 :   }
    3118             : 
    3119           0 :   void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
    3120           0 :     UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
    3121           0 :   }
    3122             : };
    3123             : 
    3124      924774 : static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
    3125             :                                                          Object** p) {
    3126      924774 :   MapWord map_word = HeapObject::cast(*p)->map_word();
    3127             : 
    3128      924774 :   if (map_word.IsForwardingAddress()) {
    3129        1921 :     return String::cast(map_word.ToForwardingAddress());
    3130             :   }
    3131             : 
    3132      922853 :   return String::cast(*p);
    3133             : }
    3134             : 
    3135       53346 : void MarkCompactCollector::EvacuatePrologue() {
    3136             :   // New space.
    3137       53346 :   NewSpace* new_space = heap()->new_space();
    3138             :   // Append the list of new space pages to be processed.
    3139      302372 :   for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
    3140       71167 :     new_space_evacuation_pages_.Add(p);
    3141             :   }
    3142       53346 :   new_space->Flip();
    3143       53346 :   new_space->ResetAllocationInfo();
    3144             : 
    3145             :   // Old space.
    3146             :   DCHECK(old_space_evacuation_pages_.is_empty());
    3147             :   old_space_evacuation_pages_.Swap(&evacuation_candidates_);
    3148             :   DCHECK(evacuation_candidates_.is_empty());
    3149       53346 : }
    3150             : 
    3151       53346 : void MarkCompactCollector::EvacuateEpilogue() {
    3152             :   // New space.
    3153       53346 :   heap()->new_space()->set_age_mark(heap()->new_space()->top());
    3154             :   // Old space. Deallocate evacuated candidate pages.
    3155       53346 :   ReleaseEvacuationCandidates();
    3156       53346 : }
    3157             : 
    3158             : class Evacuator : public Malloced {
    3159             :  public:
    3160             :   enum EvacuationMode {
    3161             :     kObjectsNewToOld,
    3162             :     kPageNewToOld,
    3163             :     kObjectsOldToOld,
    3164             :     kPageNewToNew,
    3165             :   };
    3166             : 
    3167             :   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
    3168             :     // Note: The order of checks is important in this function.
    3169      161654 :     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
    3170             :       return kPageNewToOld;
    3171      160322 :     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
    3172             :       return kPageNewToNew;
    3173      159336 :     if (chunk->InNewSpace()) return kObjectsNewToOld;
    3174             :     DCHECK(chunk->IsEvacuationCandidate());
    3175             :     return kObjectsOldToOld;
    3176             :   }
    3177             : 
    3178             :   // NewSpacePages with more live bytes than this threshold qualify for fast
    3179             :   // evacuation.
    3180             :   static int PageEvacuationThreshold() {
    3181       56454 :     if (FLAG_page_promotion)
    3182       56408 :       return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
    3183             :     return Page::kAllocatableMemory + kPointerSize;
    3184             :   }
    3185             : 
    3186       59260 :   Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
    3187             :       : heap_(heap),
    3188             :         compaction_spaces_(heap_),
    3189             :         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
    3190             :         new_space_visitor_(heap_, &compaction_spaces_, record_visitor,
    3191             :                            &local_pretenuring_feedback_),
    3192             :         new_to_new_page_visitor_(heap_, record_visitor,
    3193             :                                  &local_pretenuring_feedback_),
    3194             :         new_to_old_page_visitor_(heap_, record_visitor,
    3195             :                                  &local_pretenuring_feedback_),
    3196             : 
    3197             :         old_space_visitor_(heap_, &compaction_spaces_, record_visitor),
    3198             :         duration_(0.0),
    3199      177780 :         bytes_compacted_(0) {}
    3200             : 
    3201      118520 :   virtual ~Evacuator() {}
    3202             : 
    3203             :   bool EvacuatePage(Page* page);
    3204             : 
    3205             :   // Merge back locally cached info sequentially. Note that this method needs
    3206             :   // to be called from the main thread.
    3207             :   inline void Finalize();
    3208             : 
    3209             :   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
    3210             :   AllocationInfo CloseNewSpaceLAB() { return new_space_visitor_.CloseLAB(); }
    3211             : 
    3212             :  protected:
    3213             :   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
    3214             : 
    3215             :   // |saved_live_bytes| returns the live bytes of the page that was processed.
    3216             :   virtual bool RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
    3217             : 
    3218             :   inline Heap* heap() { return heap_; }
    3219             : 
    3220             :   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
    3221       80829 :     duration_ += duration;
    3222       80829 :     bytes_compacted_ += bytes_compacted;
    3223             :   }
    3224             : 
    3225             :   Heap* heap_;
    3226             : 
    3227             :   // Locally cached collector data.
    3228             :   CompactionSpaceCollection compaction_spaces_;
    3229             :   base::HashMap local_pretenuring_feedback_;
    3230             : 
    3231             :   // Visitors for the corresponding spaces.
    3232             :   EvacuateNewSpaceVisitor new_space_visitor_;
    3233             :   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
    3234             :       new_to_new_page_visitor_;
    3235             :   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
    3236             :       new_to_old_page_visitor_;
    3237             :   EvacuateOldSpaceVisitor old_space_visitor_;
    3238             : 
    3239             :   // Book keeping info.
    3240             :   double duration_;
    3241             :   intptr_t bytes_compacted_;
    3242             : };
    3243             : 
    3244      161652 : bool Evacuator::EvacuatePage(Page* page) {
    3245             :   bool success = false;
    3246             :   DCHECK(page->SweepingDone());
    3247       80826 :   intptr_t saved_live_bytes = 0;
    3248       80826 :   double evacuation_time = 0.0;
    3249             :   {
    3250             :     AlwaysAllocateScope always_allocate(heap()->isolate());
    3251             :     TimedScope timed_scope(&evacuation_time);
    3252       80830 :     success = RawEvacuatePage(page, &saved_live_bytes);
    3253             :   }
    3254       80829 :   ReportCompactionProgress(evacuation_time, saved_live_bytes);
    3255       80829 :   if (FLAG_trace_evacuation) {
    3256             :     PrintIsolate(
    3257             :         heap()->isolate(),
    3258             :         "evacuation[%p]: page=%p new_space=%d "
    3259             :         "page_evacuation=%d executable=%d contains_age_mark=%d "
    3260             :         "live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n",
    3261             :         static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
    3262           0 :         page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
    3263             :             page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
    3264             :         page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
    3265           0 :         page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
    3266             :         evacuation_time,
    3267           0 :         saved_live_bytes > Evacuator::PageEvacuationThreshold());
    3268             :   }
    3269       80829 :   return success;
    3270             : }
    3271             : 
    3272      474080 : void Evacuator::Finalize() {
    3273      118520 :   heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
    3274       59260 :   heap()->code_space()->MergeCompactionSpace(
    3275      118520 :       compaction_spaces_.Get(CODE_SPACE));
    3276      118520 :   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
    3277      237040 :   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
    3278      177780 :                                        new_to_old_page_visitor_.moved_bytes());
    3279             :   heap()->IncrementSemiSpaceCopiedObjectSize(
    3280       59260 :       new_space_visitor_.semispace_copied_size() +
    3281      177780 :       new_to_new_page_visitor_.moved_bytes());
    3282             :   heap()->IncrementYoungSurvivorsCounter(
    3283       59260 :       new_space_visitor_.promoted_size() +
    3284       59260 :       new_space_visitor_.semispace_copied_size() +
    3285       59260 :       new_to_old_page_visitor_.moved_bytes() +
    3286       59260 :       new_to_new_page_visitor_.moved_bytes());
    3287      118520 :   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
    3288       59260 : }
    3289             : 
    3290      118520 : class FullEvacuator : public Evacuator {
    3291             :  public:
    3292             :   FullEvacuator(MarkCompactCollector* collector,
    3293             :                 RecordMigratedSlotVisitor* record_visitor)
    3294       59260 :       : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
    3295             : 
    3296             :  protected:
    3297             :   bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
    3298             : 
    3299             :   MarkCompactCollector* collector_;
    3300             : };
    3301             : 
    3302       80822 : bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
    3303             :   bool success = false;
    3304             :   LiveObjectVisitor object_visitor;
    3305       80822 :   const MarkingState state = collector_->marking_state(page);
    3306       80822 :   *live_bytes = state.live_bytes();
    3307       80822 :   switch (ComputeEvacuationMode(page)) {
    3308             :     case kObjectsNewToOld:
    3309             :       success = object_visitor.VisitBlackObjects(
    3310       70001 :           page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
    3311             :       DCHECK(success);
    3312             :       ArrayBufferTracker::ProcessBuffers(
    3313       70010 :           page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
    3314       70009 :       break;
    3315             :     case kPageNewToOld:
    3316             :       success = object_visitor.VisitBlackObjects(
    3317             :           page, state, &new_to_old_page_visitor_,
    3318         668 :           LiveObjectVisitor::kKeepMarking);
    3319             :       DCHECK(success);
    3320         668 :       new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
    3321             :       // ArrayBufferTracker will be updated during sweeping.
    3322             :       break;
    3323             :     case kPageNewToNew:
    3324             :       success = object_visitor.VisitBlackObjects(
    3325             :           page, state, &new_to_new_page_visitor_,
    3326         489 :           LiveObjectVisitor::kKeepMarking);
    3327             :       DCHECK(success);
    3328         486 :       new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
    3329             :       // ArrayBufferTracker will be updated during sweeping.
    3330             :       break;
    3331             :     case kObjectsOldToOld:
    3332             :       success = object_visitor.VisitBlackObjects(
    3333        9661 :           page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
    3334        9665 :       if (!success) {
    3335             :         // Aborted compaction page. We have to record slots here, since we
    3336             :         // might not have recorded them in first place.
    3337             :         // Note: We mark the page as aborted here to be able to record slots
    3338             :         // for code objects in |RecordMigratedSlotVisitor|.
    3339             :         page->SetFlag(Page::COMPACTION_WAS_ABORTED);
    3340          52 :         EvacuateRecordOnlyVisitor record_visitor(heap());
    3341             :         success = object_visitor.VisitBlackObjects(
    3342          52 :             page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
    3343             :         ArrayBufferTracker::ProcessBuffers(
    3344          52 :             page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
    3345             :         DCHECK(success);
    3346             :         // We need to return failure here to indicate that we want this page
    3347             :         // added to the sweeper.
    3348             :         success = false;
    3349             :       } else {
    3350             :         ArrayBufferTracker::ProcessBuffers(
    3351        9613 :             page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
    3352             :       }
    3353             :       break;
    3354             :   }
    3355       80831 :   return success;
    3356             : }
    3357             : 
    3358             : class EvacuationJobTraits {
    3359             :  public:
    3360             :   typedef int* PerPageData;  // Pointer to number of aborted pages.
    3361             :   typedef Evacuator* PerTaskData;
    3362             : 
    3363             :   static const bool NeedSequentialFinalization = true;
    3364             : 
    3365             :   static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
    3366             :                                     MemoryChunk* chunk, PerPageData) {
    3367       80831 :     return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
    3368             :   }
    3369             : 
    3370       80832 :   static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
    3371             :                                        bool success, PerPageData data) {
    3372             :     Page* p = static_cast<Page*>(chunk);
    3373       80832 :     switch (Evacuator::ComputeEvacuationMode(p)) {
    3374             :       case Evacuator::kPageNewToOld:
    3375             :         break;
    3376             :       case Evacuator::kPageNewToNew:
    3377             :         DCHECK(success);
    3378             :         break;
    3379             :       case Evacuator::kObjectsNewToOld:
    3380             :         DCHECK(success);
    3381             :         break;
    3382             :       case Evacuator::kObjectsOldToOld:
    3383        9665 :         if (success) {
    3384             :           DCHECK(p->IsEvacuationCandidate());
    3385             :           DCHECK(p->SweepingDone());
    3386        9613 :           p->Unlink();
    3387             :         } else {
    3388             :           // We have partially compacted the page, i.e., some objects may have
    3389             :           // moved, others are still in place.
    3390             :           p->ClearEvacuationCandidate();
    3391             :           // Slots have already been recorded so we just need to add it to the
    3392             :           // sweeper, which will happen after updating pointers.
    3393          52 :           *data += 1;
    3394             :         }
    3395             :         break;
    3396             :       default:
    3397           0 :         UNREACHABLE();
    3398             :     }
    3399       80832 :   }
    3400             : };
    3401             : 
    3402       53346 : void MarkCompactCollector::EvacuatePagesInParallel() {
    3403             :   PageParallelJob<EvacuationJobTraits> job(
    3404             :       heap_, heap_->isolate()->cancelable_task_manager(),
    3405      106692 :       &page_parallel_job_semaphore_);
    3406             : 
    3407       53346 :   int abandoned_pages = 0;
    3408             :   intptr_t live_bytes = 0;
    3409      116357 :   for (Page* page : old_space_evacuation_pages_) {
    3410        9665 :     live_bytes += MarkingState::Internal(page).live_bytes();
    3411             :     job.AddPage(page, &abandoned_pages);
    3412             :   }
    3413             : 
    3414      125769 :   const bool reduce_memory = heap()->ShouldReduceMemory();
    3415       53346 :   const Address age_mark = heap()->new_space()->age_mark();
    3416      177859 :   for (Page* page : new_space_evacuation_pages_) {
    3417             :     intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
    3418       71167 :     live_bytes += live_bytes_on_page;
    3419      184075 :     if (!reduce_memory && !page->NeverEvacuate() &&
    3420       57932 :         (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
    3421       72330 :         !page->Contains(age_mark) &&
    3422        1163 :         heap()->CanExpandOldGeneration(live_bytes_on_page)) {
    3423        1157 :       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
    3424         668 :         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
    3425             :       } else {
    3426         489 :         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
    3427             :       }
    3428             :     }
    3429             : 
    3430             :     job.AddPage(page, &abandoned_pages);
    3431             :   }
    3432             :   DCHECK_GE(job.NumberOfPages(), 1);
    3433             : 
    3434             :   // Used for trace summary.
    3435             :   double compaction_speed = 0;
    3436       53346 :   if (FLAG_trace_evacuation) {
    3437           0 :     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
    3438             :   }
    3439             : 
    3440             :   const int wanted_num_tasks =
    3441       53346 :       NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
    3442       53346 :   FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks];
    3443             :   RecordMigratedSlotVisitor record_visitor(this);
    3444      112606 :   for (int i = 0; i < wanted_num_tasks; i++) {
    3445       59260 :     evacuators[i] = new FullEvacuator(this, &record_visitor);
    3446             :   }
    3447      112606 :   job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
    3448       53346 :   const Address top = heap()->new_space()->top();
    3449      112606 :   for (int i = 0; i < wanted_num_tasks; i++) {
    3450       59260 :     evacuators[i]->Finalize();
    3451             :     // Try to find the last LAB that was used for new space allocation in
    3452             :     // evacuation tasks. If it was adjacent to the current top, move top back.
    3453       59260 :     const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB();
    3454       59260 :     if (info.limit() != nullptr && info.limit() == top) {
    3455             :       DCHECK_NOT_NULL(info.top());
    3456       17914 :       *heap()->new_space()->allocation_top_address() = info.top();
    3457             :     }
    3458       59260 :     delete evacuators[i];
    3459             :   }
    3460       53346 :   delete[] evacuators;
    3461             : 
    3462       53346 :   if (FLAG_trace_evacuation) {
    3463             :     PrintIsolate(isolate(),
    3464             :                  "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
    3465             :                  "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
    3466             :                  " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
    3467             :                  isolate()->time_millis_since_init(),
    3468             :                  FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
    3469             :                  abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
    3470           0 :                  V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
    3471           0 :                  live_bytes, compaction_speed);
    3472             :   }
    3473       53346 : }
    3474             : 
    3475       53346 : class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
    3476             :  public:
    3477      106692 :   virtual Object* RetainAs(Object* object) {
    3478      106692 :     if (object->IsHeapObject()) {
    3479             :       HeapObject* heap_object = HeapObject::cast(object);
    3480             :       MapWord map_word = heap_object->map_word();
    3481      106692 :       if (map_word.IsForwardingAddress()) {
    3482         944 :         return map_word.ToForwardingAddress();
    3483             :       }
    3484             :     }
    3485      105748 :     return object;
    3486             :   }
    3487             : };
    3488             : 
    3489             : MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
    3490      479988 : MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
    3491      479988 :   AllocationSpace identity = p->owner()->identity();
    3492      702692 :   if (p->slot_set<OLD_TO_NEW>() &&
    3493      222610 :       (identity == OLD_SPACE || identity == MAP_SPACE)) {
    3494             :     return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
    3495      257749 :   } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) {
    3496             :     return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
    3497             :   }
    3498      255527 :   return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
    3499             : }
    3500             : 
    3501      480233 : int MarkCompactCollector::Sweeper::RawSweep(
    3502             :     Page* p, FreeListRebuildingMode free_list_mode,
    3503             :     FreeSpaceTreatmentMode free_space_mode) {
    3504     2474198 :   Space* space = p->owner();
    3505             :   DCHECK_NOT_NULL(space);
    3506             :   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
    3507             :          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
    3508             :   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
    3509             : 
    3510             :   // If there are old-to-new slots in that page, we have to filter out slots
    3511             :   // that are in dead memory which is freed by the sweeper.
    3512      480012 :   ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
    3513             : 
    3514             :   // The free ranges map is used for filtering typed slots.
    3515             :   std::map<uint32_t, uint32_t> free_ranges;
    3516             : 
    3517             :   // Before we sweep objects on the page, we free dead array buffers which
    3518             :   // requires valid mark bits.
    3519      480123 :   ArrayBufferTracker::FreeDead(p);
    3520             : 
    3521             :   Address free_start = p->area_start();
    3522             :   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
    3523             : 
    3524             :   // If we use the skip list for code space pages, we have to lock the skip
    3525             :   // list because it could be accessed concurrently by the runtime or the
    3526             :   // deoptimizer.
    3527             :   const bool rebuild_skip_list =
    3528      678092 :       space->identity() == CODE_SPACE && p->skip_list() != nullptr;
    3529             :   SkipList* skip_list = p->skip_list();
    3530      480068 :   if (rebuild_skip_list) {
    3531             :     skip_list->Clear();
    3532             :   }
    3533             : 
    3534             :   intptr_t freed_bytes = 0;
    3535             :   intptr_t max_freed_bytes = 0;
    3536             :   int curr_region = -1;
    3537             : 
    3538      480068 :   LiveObjectIterator<kBlackObjects> it(p, MarkingState::Internal(p));
    3539             :   HeapObject* object = NULL;
    3540             : 
    3541   633900613 :   while ((object = it.Next()) != NULL) {
    3542             :     DCHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object)));
    3543   633462851 :     Address free_end = object->address();
    3544   633462851 :     if (free_end != free_start) {
    3545    33694981 :       CHECK_GT(free_end, free_start);
    3546    33694981 :       size_t size = static_cast<size_t>(free_end - free_start);
    3547    33694981 :       if (free_space_mode == ZAP_FREE_SPACE) {
    3548             :         memset(free_start, 0xcc, size);
    3549             :       }
    3550    33698226 :       if (free_list_mode == REBUILD_FREE_LIST) {
    3551             :         freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
    3552    33707283 :             free_start, size);
    3553             :         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
    3554             :       } else {
    3555             :         p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
    3556      271528 :                                         ClearRecordedSlots::kNo);
    3557             :       }
    3558             : 
    3559    33848285 :       if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
    3560             :         RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
    3561    32967821 :                                                SlotSet::KEEP_EMPTY_BUCKETS);
    3562      880464 :       } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
    3563             :         free_ranges.insert(std::pair<uint32_t, uint32_t>(
    3564       41721 :             static_cast<uint32_t>(free_start - p->address()),
    3565      125163 :             static_cast<uint32_t>(free_end - p->address())));
    3566             :       }
    3567             :     }
    3568             :     Map* map = object->synchronized_map();
    3569   633537834 :     int size = object->SizeFromMap(map);
    3570   632940067 :     if (rebuild_skip_list) {
    3571             :       int new_region_start = SkipList::RegionNumber(free_end);
    3572             :       int new_region_end =
    3573    74239320 :           SkipList::RegionNumber(free_end + size - kPointerSize);
    3574    74239320 :       if (new_region_start != curr_region || new_region_end != curr_region) {
    3575             :         skip_list->AddObject(free_end, size);
    3576             :         curr_region = new_region_end;
    3577             :       }
    3578             :     }
    3579   632940067 :     free_start = free_end + size;
    3580             :   }
    3581             : 
    3582      480403 :   if (free_start != p->area_end()) {
    3583      472425 :     CHECK_GT(p->area_end(), free_start);
    3584      472425 :     size_t size = static_cast<size_t>(p->area_end() - free_start);
    3585      472425 :     if (free_space_mode == ZAP_FREE_SPACE) {
    3586             :       memset(free_start, 0xcc, size);
    3587             :     }
    3588      472426 :     if (free_list_mode == REBUILD_FREE_LIST) {
    3589             :       freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
    3590      472088 :           free_start, size);
    3591             :       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
    3592             :     } else {
    3593             :       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
    3594         724 :                                       ClearRecordedSlots::kNo);
    3595             :     }
    3596             : 
    3597      472452 :     if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
    3598             :       RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
    3599      217057 :                                              SlotSet::KEEP_EMPTY_BUCKETS);
    3600      255395 :     } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
    3601             :       free_ranges.insert(std::pair<uint32_t, uint32_t>(
    3602        2219 :           static_cast<uint32_t>(free_start - p->address()),
    3603        6657 :           static_cast<uint32_t>(p->area_end() - p->address())));
    3604             :     }
    3605             :   }
    3606             : 
    3607             :   // Clear invalid typed slots after collection all free ranges.
    3608      480386 :   if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
    3609             :     TypedSlotSet* typed_slot_set = p->typed_slot_set<OLD_TO_NEW>();
    3610        2228 :     if (typed_slot_set != nullptr) {
    3611        2228 :       typed_slot_set->RemoveInvaldSlots(free_ranges);
    3612             :     }
    3613             :   }
    3614             : 
    3615             :   // Clear the mark bits of that page and reset live bytes count.
    3616             :   MarkingState::Internal(p).ClearLiveness();
    3617             : 
    3618             :   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
    3619      480377 :   if (free_list_mode == IGNORE_FREE_LIST) return 0;
    3620      959468 :   return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
    3621             : }
    3622             : 
    3623      422165 : void MarkCompactCollector::InvalidateCode(Code* code) {
    3624      422165 :   Page* page = Page::FromAddress(code->address());
    3625      422165 :   Address start = code->instruction_start();
    3626      422165 :   Address end = code->address() + code->Size();
    3627             : 
    3628      422165 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
    3629             : 
    3630      844336 :   if (heap_->incremental_marking()->IsCompacting() &&
    3631             :       !ShouldSkipEvacuationSlotRecording(code)) {
    3632             :     DCHECK(compacting_);
    3633             : 
    3634             :     // If the object is white than no slots were recorded on it yet.
    3635      422169 :     if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) return;
    3636             : 
    3637             :     // Ignore all slots that might have been recorded in the body of the
    3638             :     // deoptimized code object. Assumption: no slots will be recorded for
    3639             :     // this object after invalidating it.
    3640           1 :     RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
    3641             :   }
    3642             : }
    3643             : 
    3644             : 
    3645             : // Return true if the given code is deoptimized or will be deoptimized.
    3646           0 : bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
    3647           0 :   return code->is_optimized_code() && code->marked_for_deoptimization();
    3648             : }
    3649             : 
    3650           0 : void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
    3651           0 :   EvacuateRecordOnlyVisitor visitor(heap());
    3652             :   LiveObjectVisitor object_visitor;
    3653             :   object_visitor.VisitBlackObjects(page, MarkingState::Internal(page), &visitor,
    3654           0 :                                    LiveObjectVisitor::kKeepMarking);
    3655           0 : }
    3656             : 
    3657             : template <class Visitor>
    3658       80859 : bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
    3659             :                                           const MarkingState& state,
    3660             :                                           Visitor* visitor,
    3661             :                                           IterationMode iteration_mode) {
    3662       80859 :   LiveObjectIterator<kBlackObjects> it(chunk, state);
    3663             :   HeapObject* object = nullptr;
    3664    35191440 :   while ((object = it.Next()) != nullptr) {
    3665             :     DCHECK(ObjectMarking::IsBlack(object, state));
    3666    34012755 :     if (!visitor->Visit(object)) {
    3667          52 :       if (iteration_mode == kClearMarkbits) {
    3668          52 :         state.bitmap()->ClearRange(
    3669             :             chunk->AddressToMarkbitIndex(chunk->area_start()),
    3670          52 :             chunk->AddressToMarkbitIndex(object->address()));
    3671             :         SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>();
    3672          52 :         if (slot_set != nullptr) {
    3673           7 :           slot_set->RemoveRange(
    3674             :               0, static_cast<int>(object->address() - chunk->address()),
    3675           7 :               SlotSet::PREFREE_EMPTY_BUCKETS);
    3676             :         }
    3677          52 :         RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
    3678             :                                                     object->address());
    3679          52 :         RecomputeLiveBytes(chunk, state);
    3680             :       }
    3681             :       return false;
    3682             :     }
    3683             :   }
    3684       80826 :   if (iteration_mode == kClearMarkbits) {
    3685             :     state.ClearLiveness();
    3686             :   }
    3687             :   return true;
    3688             : }
    3689             : 
    3690          52 : void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
    3691          52 :                                            const MarkingState& state) {
    3692          52 :   LiveObjectIterator<kBlackObjects> it(chunk, state);
    3693             :   int new_live_size = 0;
    3694             :   HeapObject* object = nullptr;
    3695         786 :   while ((object = it.Next()) != nullptr) {
    3696         682 :     new_live_size += object->Size();
    3697             :   }
    3698             :   state.SetLiveBytes(new_live_size);
    3699          52 : }
    3700             : 
    3701           0 : void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
    3702             :                                                      Page* page) {
    3703           0 :   base::LockGuard<base::Mutex> guard(&mutex_);
    3704           0 :   swept_list_[space->identity()].Add(page);
    3705           0 : }
    3706             : 
    3707       53346 : void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
    3708      640152 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
    3709             :   Heap::RelocationLock relocation_lock(heap());
    3710             : 
    3711             :   {
    3712      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
    3713      106692 :     EvacuatePrologue();
    3714             :   }
    3715             : 
    3716             :   {
    3717      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
    3718             :     EvacuationScope evacuation_scope(this);
    3719      106692 :     EvacuatePagesInParallel();
    3720             :   }
    3721             : 
    3722       53346 :   UpdatePointersAfterEvacuation();
    3723             : 
    3724             :   {
    3725      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
    3726       53346 :     if (!heap()->new_space()->Rebalance()) {
    3727           0 :       FatalProcessOutOfMemory("NewSpace::Rebalance");
    3728       53346 :     }
    3729             :   }
    3730             : 
    3731             :   // Give pages that are queued to be freed back to the OS. Note that filtering
    3732             :   // slots only handles old space (for unboxed doubles), and thus map space can
    3733             :   // still contain stale pointers. We only free the chunks after pointer updates
    3734             :   // to still have access to page headers.
    3735       53346 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    3736             : 
    3737             :   {
    3738      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
    3739             : 
    3740      177859 :     for (Page* p : new_space_evacuation_pages_) {
    3741       71167 :       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
    3742             :         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
    3743         489 :         sweeper().AddPage(p->owner()->identity(), p);
    3744       70678 :       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
    3745             :         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
    3746             :         p->ForAllFreeListCategories(
    3747             :             [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
    3748         668 :         sweeper().AddPage(p->owner()->identity(), p);
    3749             :       }
    3750             :     }
    3751             :     new_space_evacuation_pages_.Rewind(0);
    3752             : 
    3753      116357 :     for (Page* p : old_space_evacuation_pages_) {
    3754             :       // Important: skip list should be cleared only after roots were updated
    3755             :       // because root iteration traverses the stack and might have to find
    3756             :       // code objects from non-updated pc pointing into evacuation candidate.
    3757        9665 :       SkipList* list = p->skip_list();
    3758        9665 :       if (list != NULL) list->Clear();
    3759        9665 :       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
    3760          52 :         sweeper().AddPage(p->owner()->identity(), p);
    3761             :         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
    3762             :       }
    3763       53346 :     }
    3764             :   }
    3765             : 
    3766             :   {
    3767      213384 :     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
    3768      106692 :     EvacuateEpilogue();
    3769       53346 :   }
    3770             : 
    3771             : #ifdef VERIFY_HEAP
    3772             :   if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
    3773             :     FullEvacuationVerifier verifier(heap());
    3774             :     verifier.Run();
    3775             :   }
    3776             : #endif
    3777       53346 : }
    3778             : 
    3779             : template <RememberedSetType type>
    3780             : class PointerUpdateJobTraits {
    3781             :  public:
    3782             :   typedef int PerPageData;  // Per page data is not used in this job.
    3783             :   typedef int PerTaskData;  // Per task data is not used in this job.
    3784             : 
    3785      237720 :   static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
    3786             :                                     PerPageData) {
    3787             :     UpdateUntypedPointers(heap, chunk);
    3788             :     UpdateTypedPointers(heap, chunk);
    3789      237650 :     return true;
    3790             :   }
    3791             :   static const bool NeedSequentialFinalization = false;
    3792             :   static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
    3793             :   }
    3794             : 
    3795             :  private:
    3796             :   static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
    3797             :     if (type == OLD_TO_NEW) {
    3798      229079 :       RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
    3799    46241833 :         return CheckAndUpdateOldToNewSlot(heap, slot);
    3800    46241833 :       });
    3801             :     } else {
    3802        8641 :       RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
    3803             :         return UpdateSlot(reinterpret_cast<Object**>(slot));
    3804     9752507 :       });
    3805             :     }
    3806             :   }
    3807             : 
    3808             :   static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
    3809             :     if (type == OLD_TO_OLD) {
    3810             :       Isolate* isolate = heap->isolate();
    3811        8642 :       RememberedSet<OLD_TO_OLD>::IterateTyped(
    3812             :           chunk,
    3813             :           [isolate](SlotType slot_type, Address host_addr, Address slot) {
    3814             :             return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
    3815      242984 :                                                           slot, UpdateSlot);
    3816      242984 :           });
    3817             :     } else {
    3818             :       Isolate* isolate = heap->isolate();
    3819      228998 :       RememberedSet<OLD_TO_NEW>::IterateTyped(
    3820             :           chunk,
    3821             :           [isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
    3822             :             return UpdateTypedSlotHelper::UpdateTypedSlot(
    3823             :                 isolate, slot_type, slot, [heap](Object** slot) {
    3824             :                   return CheckAndUpdateOldToNewSlot(
    3825      201233 :                       heap, reinterpret_cast<Address>(slot));
    3826      402285 :                 });
    3827      201052 :           });
    3828             :     }
    3829             :   }
    3830             : 
    3831    46440274 :   static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
    3832             :                                                        Address slot_address) {
    3833             :     // There may be concurrent action on slots in dead objects. Concurrent
    3834             :     // sweeper threads may overwrite the slot content with a free space object.
    3835             :     // Moreover, the pointed-to object may also get concurrently overwritten
    3836             :     // with a free space object. The sweeper always gets priority performing
    3837             :     // these writes.
    3838             :     base::NoBarrierAtomicValue<Object*>* slot =
    3839             :         base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
    3840             :     Object* slot_reference = slot->Value();
    3841    46199293 :     if (heap->InFromSpace(slot_reference)) {
    3842             :       HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
    3843             :       DCHECK(heap_object->IsHeapObject());
    3844             :       MapWord map_word = heap_object->map_word();
    3845             :       // There could still be stale pointers in large object space, map space,
    3846             :       // and old space for pages that have been promoted.
    3847    36373817 :       if (map_word.IsForwardingAddress()) {
    3848             :         // A sweeper thread may concurrently write a size value which looks like
    3849             :         // a forwarding pointer. We have to ignore these values.
    3850    30805738 :         if (map_word.ToRawValue() < Page::kPageSize) {
    3851             :           return REMOVE_SLOT;
    3852             :         }
    3853             :         // Update the corresponding slot only if the slot content did not
    3854             :         // change in the meantime. This may happen when a concurrent sweeper
    3855             :         // thread stored a free space object at that memory location.
    3856    30873231 :         slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
    3857             :       }
    3858             :       // If the object was in from space before and is after executing the
    3859             :       // callback in to space, the object is still live.
    3860             :       // Unfortunately, we do not know about the slot. It could be in a
    3861             :       // just freed free space object.
    3862    36951118 :       if (heap->InToSpace(slot->Value())) {
    3863             :         return KEEP_SLOT;
    3864             :       }
    3865     9825476 :     } else if (heap->InToSpace(slot_reference)) {
    3866             :       // Slots can point to "to" space if the page has been moved, or if the
    3867             :       // slot has been recorded multiple times in the remembered set. Since
    3868             :       // there is no forwarding information present we need to check the
    3869             :       // markbits to determine liveness.
    3870             :       HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
    3871       97651 :       if (ObjectMarking::IsBlack(heap_object,
    3872             :                                  MarkingState::Internal(heap_object)))
    3873             :         return KEEP_SLOT;
    3874             :     } else {
    3875             :       DCHECK(!heap->InNewSpace(slot_reference));
    3876             :     }
    3877             :     return REMOVE_SLOT;
    3878             :   }
    3879             : };
    3880             : 
    3881      106692 : int NumberOfPointerUpdateTasks(int pages) {
    3882      106692 :   if (!FLAG_parallel_pointer_update) return 1;
    3883             :   const int available_cores = Max(
    3884             :       1, static_cast<int>(
    3885      106450 :              V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
    3886             :   const int kPagesPerTask = 4;
    3887      212900 :   return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
    3888             : }
    3889             : 
    3890             : template <RememberedSetType type>
    3891      106692 : void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
    3892             :   PageParallelJob<PointerUpdateJobTraits<type> > job(
    3893      106692 :       heap, heap->isolate()->cancelable_task_manager(), semaphore);
    3894      106692 :   RememberedSet<type>::IterateMemoryChunks(
    3895      475514 :       heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
    3896      106692 :   int num_pages = job.NumberOfPages();
    3897      106692 :   int num_tasks = NumberOfPointerUpdateTasks(num_pages);
    3898      106692 :   job.Run(num_tasks, [](int i) { return 0; });
    3899      106692 : }
    3900             : 
    3901             : class ToSpacePointerUpdateJobTraits {
    3902             :  public:
    3903             :   typedef std::pair<Address, Address> PerPageData;
    3904             :   typedef PointersUpdatingVisitor* PerTaskData;
    3905             : 
    3906       54211 :   static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
    3907             :                                     MemoryChunk* chunk, PerPageData limits) {
    3908       54211 :     if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
    3909             :       // New->new promoted pages contain garbage so they require iteration
    3910             :       // using markbits.
    3911         489 :       ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
    3912             :     } else {
    3913       53722 :       ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
    3914             :     }
    3915       54211 :     return true;
    3916             :   }
    3917             : 
    3918             :   static const bool NeedSequentialFinalization = false;
    3919             :   static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
    3920             :   }
    3921             : 
    3922             :  private:
    3923       75212 :   static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
    3924             :                                             MemoryChunk* chunk,
    3925             :                                             PerPageData limits) {
    3926    15185902 :     for (Address cur = limits.first; cur < limits.second;) {
    3927    15056968 :       HeapObject* object = HeapObject::FromAddress(cur);
    3928             :       Map* map = object->map();
    3929    15056968 :       int size = object->SizeFromMap(map);
    3930    15070454 :       object->IterateBody(map->instance_type(), size, visitor);
    3931    15035478 :       cur += size;
    3932             :     }
    3933       53722 :   }
    3934             : 
    3935         488 :   static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
    3936             :                                              MemoryChunk* chunk,
    3937             :                                              PerPageData limits) {
    3938         488 :     LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk));
    3939             :     HeapObject* object = NULL;
    3940     1031983 :     while ((object = it.Next()) != NULL) {
    3941             :       Map* map = object->map();
    3942     1032137 :       int size = object->SizeFromMap(map);
    3943     1028171 :       object->IterateBody(map->instance_type(), size, visitor);
    3944             :     }
    3945         489 :   }
    3946             : };
    3947             : 
    3948      160038 : void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
    3949             :   PageParallelJob<ToSpacePointerUpdateJobTraits> job(
    3950       53346 :       heap, heap->isolate()->cancelable_task_manager(), semaphore);
    3951             :   Address space_start = heap->new_space()->bottom();
    3952             :   Address space_end = heap->new_space()->top();
    3953      268460 :   for (Page* page : PageRange(space_start, space_end)) {
    3954             :     Address start =
    3955       55076 :         page->Contains(space_start) ? space_start : page->area_start();
    3956       54211 :     Address end = page->Contains(space_end) ? space_end : page->area_end();
    3957             :     job.AddPage(page, std::make_pair(start, end));
    3958             :   }
    3959       53346 :   PointersUpdatingVisitor visitor;
    3960       53346 :   int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
    3961      107359 :   job.Run(num_tasks, [&visitor](int i) { return &visitor; });
    3962       53346 : }
    3963             : 
    3964       53346 : void MarkCompactCollector::UpdatePointersAfterEvacuation() {
    3965      426768 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
    3966             : 
    3967             : 
    3968             :   {
    3969      213384 :     TRACE_GC(heap()->tracer(),
    3970             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
    3971       53346 :     UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
    3972             :     // Update roots.
    3973       53346 :     PointersUpdatingVisitor updating_visitor;
    3974       53346 :     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
    3975      106692 :     UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
    3976             :   }
    3977             : 
    3978             :   {
    3979       53346 :     Heap* heap = this->heap();
    3980      213384 :     TRACE_GC(heap->tracer(),
    3981             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
    3982      106692 :     UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
    3983             :   }
    3984             : 
    3985             :   {
    3986      213384 :     TRACE_GC(heap()->tracer(),
    3987             :              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
    3988             :     // Update pointers from external string table.
    3989             :     heap_->UpdateReferencesInExternalStringTable(
    3990       53346 :         &UpdateReferenceInExternalStringTableEntry);
    3991             : 
    3992       53346 :     EvacuationWeakObjectRetainer evacuation_object_retainer;
    3993      106692 :     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
    3994       53346 :   }
    3995       53346 : }
    3996             : 
    3997             : 
    3998       53346 : void MarkCompactCollector::ReleaseEvacuationCandidates() {
    3999      116357 :   for (Page* p : old_space_evacuation_pages_) {
    4000        9665 :     if (!p->IsEvacuationCandidate()) continue;
    4001        9613 :     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
    4002             :     MarkingState::Internal(p).SetLiveBytes(0);
    4003        9613 :     CHECK(p->SweepingDone());
    4004        9613 :     space->ReleasePage(p);
    4005             :   }
    4006             :   old_space_evacuation_pages_.Rewind(0);
    4007       53346 :   compacting_ = false;
    4008       53346 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    4009       53346 : }
    4010             : 
    4011      888276 : int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
    4012             :                                                       int required_freed_bytes,
    4013             :                                                       int max_pages) {
    4014             :   int max_freed = 0;
    4015             :   int pages_freed = 0;
    4016             :   Page* page = nullptr;
    4017     2224683 :   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
    4018      480271 :     int freed = ParallelSweepPage(page, identity);
    4019      480284 :     pages_freed += 1;
    4020             :     DCHECK_GE(freed, 0);
    4021             :     max_freed = Max(max_freed, freed);
    4022      480284 :     if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
    4023             :       return max_freed;
    4024      451843 :     if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
    4025             :   }
    4026             :   return max_freed;
    4027             : }
    4028             : 
    4029      482484 : int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
    4030             :                                                      AllocationSpace identity) {
    4031             :   int max_freed = 0;
    4032             :   {
    4033      482484 :     base::LockGuard<base::RecursiveMutex> guard(page->mutex());
    4034             :     // If this page was already swept in the meantime, we can return here.
    4035      482481 :     if (page->SweepingDone()) return 0;
    4036             :     DCHECK_EQ(Page::kSweepingPending,
    4037             :               page->concurrent_sweeping_state().Value());
    4038             :     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
    4039             :     const Sweeper::FreeSpaceTreatmentMode free_space_mode =
    4040             :         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
    4041      480217 :     if (identity == NEW_SPACE) {
    4042         489 :       RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
    4043             :     } else {
    4044      479728 :       max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
    4045             :     }
    4046             :     DCHECK(page->SweepingDone());
    4047             : 
    4048             :     // After finishing sweeping of a page we clean up its remembered set.
    4049             :     TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
    4050      480217 :     if (typed_slot_set) {
    4051        2228 :       typed_slot_set->FreeToBeFreedChunks();
    4052             :     }
    4053             :     SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
    4054      480200 :     if (slot_set) {
    4055      222524 :       slot_set->FreeToBeFreedBuckets();
    4056             :     }
    4057             :   }
    4058             : 
    4059             :   {
    4060      480270 :     base::LockGuard<base::Mutex> guard(&mutex_);
    4061      480284 :     swept_list_[identity].Add(page);
    4062             :   }
    4063      480284 :   return max_freed;
    4064             : }
    4065             : 
    4066           0 : void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
    4067             :   DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
    4068      480284 :   PrepareToBeSweptPage(space, page);
    4069      480284 :   sweeping_list_[space].push_back(page);
    4070           0 : }
    4071             : 
    4072      480284 : void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
    4073             :                                                          Page* page) {
    4074             :   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
    4075             :   DCHECK_GE(page->area_size(),
    4076             :             static_cast<size_t>(MarkingState::Internal(page).live_bytes()));
    4077             :   size_t to_sweep =
    4078      480284 :       page->area_size() - MarkingState::Internal(page).live_bytes();
    4079      480284 :   if (space != NEW_SPACE)
    4080      479795 :     heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
    4081      480284 : }
    4082             : 
    4083     1336369 : Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
    4084             :     AllocationSpace space) {
    4085     1336369 :   base::LockGuard<base::Mutex> guard(&mutex_);
    4086             :   Page* page = nullptr;
    4087     2673458 :   if (!sweeping_list_[space].empty()) {
    4088      480284 :     page = sweeping_list_[space].front();
    4089      480284 :     sweeping_list_[space].pop_front();
    4090             :   }
    4091     1336666 :   return page;
    4092             : }
    4093             : 
    4094           0 : void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
    4095             :                                                         Page* page) {
    4096           0 :   base::LockGuard<base::Mutex> guard(&mutex_);
    4097           0 :   sweeping_list_[space].push_back(page);
    4098           0 : }
    4099             : 
    4100      160038 : void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
    4101      160038 :   space->ClearStats();
    4102             : 
    4103             :   int will_be_swept = 0;
    4104             :   bool unused_page_present = false;
    4105             : 
    4106             :   // Loop needs to support deletion if live bytes == 0 for a page.
    4107      812862 :   for (auto it = space->begin(); it != space->end();) {
    4108             :     Page* p = *(it++);
    4109             :     DCHECK(p->SweepingDone());
    4110             : 
    4111      492786 :     if (p->IsEvacuationCandidate()) {
    4112             :       // Will be processed in EvacuateNewSpaceAndCandidates.
    4113             :       DCHECK(evacuation_candidates_.length() > 0);
    4114             :       continue;
    4115             :     }
    4116             : 
    4117      483121 :     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
    4118             :       // We need to sweep the page to get it into an iterable state again. Note
    4119             :       // that this adds unusable memory into the free list that is later on
    4120             :       // (in the free list) dropped again. Since we only use the flag for
    4121             :       // testing this is fine.
    4122             :       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
    4123             :       Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
    4124             :                         Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
    4125         154 :                                                  : Sweeper::IGNORE_FREE_SPACE);
    4126         154 :       continue;
    4127             :     }
    4128             : 
    4129             :     // One unused page is kept, all further are released before sweeping them.
    4130      482967 :     if (MarkingState::Internal(p).live_bytes() == 0) {
    4131       17881 :       if (unused_page_present) {
    4132             :         if (FLAG_gc_verbose) {
    4133             :           PrintIsolate(isolate(), "sweeping: released page: %p",
    4134             :                        static_cast<void*>(p));
    4135             :         }
    4136        3892 :         ArrayBufferTracker::FreeAll(p);
    4137        3892 :         space->ReleasePage(p);
    4138        3892 :         continue;
    4139             :       }
    4140             :       unused_page_present = true;
    4141             :     }
    4142             : 
    4143      479075 :     sweeper().AddPage(space->identity(), p);
    4144             :     will_be_swept++;
    4145             :   }
    4146             : 
    4147             :   if (FLAG_gc_verbose) {
    4148             :     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
    4149             :                  AllocationSpaceName(space->identity()), will_be_swept);
    4150             :   }
    4151      160038 : }
    4152             : 
    4153       53346 : void MarkCompactCollector::StartSweepSpaces() {
    4154      533460 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
    4155             : #ifdef DEBUG
    4156             :   state_ = SWEEP_SPACES;
    4157             : #endif
    4158             : 
    4159             :   {
    4160             :     {
    4161             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    4162       53346 :                                   GCTracer::Scope::MC_SWEEP_OLD);
    4163       53346 :       StartSweepSpace(heap()->old_space());
    4164             :     }
    4165             :     {
    4166             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    4167       53346 :                                   GCTracer::Scope::MC_SWEEP_CODE);
    4168       53346 :       StartSweepSpace(heap()->code_space());
    4169             :     }
    4170             :     {
    4171             :       GCTracer::Scope sweep_scope(heap()->tracer(),
    4172       53346 :                                   GCTracer::Scope::MC_SWEEP_MAP);
    4173       53346 :       StartSweepSpace(heap()->map_space());
    4174             :     }
    4175       53346 :     sweeper().StartSweeping();
    4176             :   }
    4177             : 
    4178             :   // Deallocate unmarked large objects.
    4179      106692 :   heap_->lo_space()->FreeUnmarkedObjects();
    4180       53346 : }
    4181             : 
    4182       58018 : void MarkCompactCollector::Initialize() {
    4183             :   MarkCompactMarkingVisitor::Initialize();
    4184       58018 :   IncrementalMarking::Initialize();
    4185       58018 : }
    4186             : 
    4187    61695087 : void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
    4188             :                                                Code* target) {
    4189             :   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
    4190             :   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
    4191    61699798 :   if (target_page->IsEvacuationCandidate() &&
    4192             :       !ShouldSkipEvacuationSlotRecording(host)) {
    4193             :     // TODO(ulan): remove this check after investigating crbug.com/414964.
    4194        1113 :     CHECK(target->IsCode());
    4195             :     RememberedSet<OLD_TO_OLD>::InsertTyped(
    4196        1113 :         source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
    4197             :   }
    4198    61695087 : }
    4199             : 
    4200             : 
    4201           0 : void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
    4202             :   DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
    4203           0 :   if (is_compacting()) {
    4204             :     Code* host =
    4205             :         isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
    4206           0 :             pc);
    4207           0 :     if (ObjectMarking::IsBlack(host, MarkingState::Internal(host))) {
    4208             :       RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
    4209             :       // The target is always in old space, we don't have to record the slot in
    4210             :       // the old-to-new remembered set.
    4211             :       DCHECK(!heap()->InNewSpace(target));
    4212           0 :       RecordRelocSlot(host, &rinfo, target);
    4213             :     }
    4214             :   }
    4215           0 : }
    4216             : 
    4217             : }  // namespace internal
    4218             : }  // namespace v8

Generated by: LCOV version 1.10