LCOV - code coverage report
Current view: top level - src/heap - incremental-marking.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 427 482 88.6 %
Date: 2019-04-19 Functions: 49 61 80.3 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/incremental-marking.h"
       6             : 
       7             : #include "src/compilation-cache.h"
       8             : #include "src/conversions.h"
       9             : #include "src/heap/concurrent-marking.h"
      10             : #include "src/heap/embedder-tracing.h"
      11             : #include "src/heap/gc-idle-time-handler.h"
      12             : #include "src/heap/gc-tracer.h"
      13             : #include "src/heap/heap-inl.h"
      14             : #include "src/heap/incremental-marking-inl.h"
      15             : #include "src/heap/mark-compact-inl.h"
      16             : #include "src/heap/object-stats.h"
      17             : #include "src/heap/objects-visiting-inl.h"
      18             : #include "src/heap/objects-visiting.h"
      19             : #include "src/heap/sweeper.h"
      20             : #include "src/objects/data-handler-inl.h"
      21             : #include "src/objects/embedder-data-array-inl.h"
      22             : #include "src/objects/hash-table-inl.h"
      23             : #include "src/objects/slots-inl.h"
      24             : #include "src/tracing/trace-event.h"
      25             : #include "src/transitions-inl.h"
      26             : #include "src/v8.h"
      27             : #include "src/visitors.h"
      28             : #include "src/vm-state-inl.h"
      29             : 
      30             : namespace v8 {
      31             : namespace internal {
      32             : 
      33             : using IncrementalMarkingMarkingVisitor =
      34             :     MarkingVisitor<FixedArrayVisitationMode::kIncremental,
      35             :                    TraceRetainingPathMode::kDisabled,
      36             :                    IncrementalMarking::MarkingState>;
      37             : 
      38       71484 : void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
      39             :                                         size_t size) {
      40       71484 :   Heap* heap = incremental_marking_.heap();
      41             :   VMState<GC> state(heap->isolate());
      42             :   RuntimeCallTimerScope runtime_timer(
      43             :       heap->isolate(),
      44       71484 :       RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
      45       71484 :   incremental_marking_.AdvanceOnAllocation();
      46             :   // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
      47       71485 :   incremental_marking_.EnsureBlackAllocated(addr, size);
      48       71485 : }
      49             : 
      50       62441 : IncrementalMarking::IncrementalMarking(
      51             :     Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
      52             :     WeakObjects* weak_objects)
      53             :     : heap_(heap),
      54             :       marking_worklist_(marking_worklist),
      55             :       weak_objects_(weak_objects),
      56             :       initial_old_generation_size_(0),
      57             :       bytes_marked_(0),
      58             :       scheduled_bytes_to_mark_(0),
      59             :       schedule_update_time_ms_(0),
      60             :       bytes_marked_concurrently_(0),
      61             :       is_compacting_(false),
      62             :       should_hurry_(false),
      63             :       was_activated_(false),
      64             :       black_allocation_(false),
      65             :       finalize_marking_completed_(false),
      66             :       request_type_(NONE),
      67             :       new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
      68      124882 :       old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
      69             :   DCHECK_NOT_NULL(marking_worklist_);
      70             :   SetState(STOPPED);
      71       62441 : }
      72             : 
      73             : bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
      74   130360144 :   HeapObject value_heap_obj = HeapObject::cast(value);
      75             :   DCHECK(!marking_state()->IsImpossible(value_heap_obj));
      76             :   DCHECK(!marking_state()->IsImpossible(obj));
      77             : #ifdef V8_CONCURRENT_MARKING
      78             :   // The write barrier stub generated with V8_CONCURRENT_MARKING does not
      79             :   // check the color of the source object.
      80             :   const bool need_recording = true;
      81             : #else
      82             :   const bool need_recording = marking_state()->IsBlack(obj);
      83             : #endif
      84             : 
      85   130360144 :   if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
      86    19234722 :     RestartIfNotMarking();
      87             :   }
      88   130360194 :   return is_compacting_ && need_recording;
      89             : }
      90             : 
      91   130088176 : void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
      92             :                                          Object value) {
      93   139246202 :   if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
      94             :     // Object is not going to be rescanned we need to record the slot.
      95             :     heap_->mark_compact_collector()->RecordSlot(obj, slot,
      96             :                                                 HeapObject::cast(value));
      97             :   }
      98   130088226 : }
      99             : 
     100     5379684 : int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
     101             :                                             Address slot_address,
     102             :                                             Isolate* isolate) {
     103             :   HeapObject obj = HeapObject::cast(Object(raw_obj));
     104             :   MaybeObjectSlot slot(slot_address);
     105             :   isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
     106             :                                                                *slot);
     107             :   // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
     108     5379686 :   return 0;
     109             : }
     110             : 
     111      271968 : void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
     112             :                                              HeapObject value) {
     113             :   DCHECK(IsMarking());
     114      271968 :   if (BaseRecordWrite(host, value)) {
     115             :     // Object is not going to be rescanned.  We need to record the slot.
     116        5247 :     heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
     117             :   }
     118      271968 : }
     119             : 
     120   371232651 : bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
     121   371232242 :   if (marking_state()->WhiteToGrey(obj)) {
     122             :     marking_worklist()->Push(obj);
     123    92834174 :     return true;
     124             :   }
     125             :   return false;
     126             : }
     127             : 
     128     3305033 : void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
     129             :     HeapObject obj) {
     130     9915103 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
     131    13220140 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
     132             :   marking_state()->WhiteToGrey(obj);
     133     3305036 :   if (marking_state()->GreyToBlack(obj)) {
     134     1977103 :     RevisitObject(obj);
     135             :   }
     136     3305037 : }
     137             : 
     138         161 : void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
     139             :   DCHECK(IsMarking());
     140             :   DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
     141             :   DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
     142             :   DCHECK_NE(from, to);
     143             : 
     144             :   MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
     145             : 
     146         322 :   if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
     147             :     // Nothing to do if the object is in black area.
     148             :     return;
     149             :   }
     150           6 :   MarkBlackAndVisitObjectDueToLayoutChange(from);
     151             :   DCHECK(marking_state()->IsBlack(from));
     152             :   // Mark the new address as black.
     153          12 :   if (from->address() + kTaggedSize == to->address()) {
     154             :     // The old and the new markbits overlap. The |to| object has the
     155             :     // grey color. To make it black, we need to set the second bit.
     156             :     DCHECK(new_mark_bit.Get<kAtomicity>());
     157             :     new_mark_bit.Next().Set<kAtomicity>();
     158             :   } else {
     159             :     bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
     160             :     DCHECK(success);
     161             :     USE(success);
     162             :   }
     163             :   DCHECK(marking_state()->IsBlack(to));
     164             : }
     165             : 
     166       47414 : class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
     167             :  public:
     168             :   explicit IncrementalMarkingRootMarkingVisitor(
     169             :       IncrementalMarking* incremental_marking)
     170       47414 :       : heap_(incremental_marking->heap()) {}
     171             : 
     172   147503888 :   void VisitRootPointer(Root root, const char* description,
     173             :                         FullObjectSlot p) override {
     174             :     MarkObjectByPointer(p);
     175   147503916 :   }
     176             : 
     177     1616365 :   void VisitRootPointers(Root root, const char* description,
     178             :                          FullObjectSlot start, FullObjectSlot end) override {
     179    36541709 :     for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
     180     1616365 :   }
     181             : 
     182             :  private:
     183             :   void MarkObjectByPointer(FullObjectSlot p) {
     184             :     Object obj = *p;
     185   180812867 :     if (!obj->IsHeapObject()) return;
     186             : 
     187   347752158 :     heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
     188             :   }
     189             : 
     190             :   Heap* heap_;
     191             : };
     192             : 
     193           0 : void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     194             :     PagedSpace* space) {
     195      235860 :   for (Page* p : *space) {
     196      169056 :     p->SetOldGenerationPageFlags(false);
     197             :   }
     198           0 : }
     199             : 
     200             : 
     201           0 : void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     202             :     NewSpace* space) {
     203      179378 :   for (Page* p : *space) {
     204      157110 :     p->SetYoungGenerationPageFlags(false);
     205             :   }
     206           0 : }
     207             : 
     208             : 
     209       22268 : void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
     210       22268 :   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
     211       22268 :   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
     212       22268 :   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
     213       22268 :   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
     214             : 
     215       22268 :   for (LargePage* p : *heap_->new_lo_space()) {
     216        1101 :     p->SetYoungGenerationPageFlags(false);
     217             :     DCHECK(p->IsLargePage());
     218             :   }
     219             : 
     220       22268 :   for (LargePage* p : *heap_->lo_space()) {
     221        6389 :     p->SetOldGenerationPageFlags(false);
     222             :   }
     223             : 
     224       22268 :   for (LargePage* p : *heap_->code_lo_space()) {
     225        9430 :     p->SetOldGenerationPageFlags(false);
     226             :   }
     227       22268 : }
     228             : 
     229             : 
     230           0 : void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
     231      244212 :   for (Page* p : *space) {
     232      162243 :     p->SetOldGenerationPageFlags(true);
     233             :   }
     234           0 : }
     235             : 
     236             : 
     237           0 : void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
     238      201105 :   for (Page* p : *space) {
     239      173782 :     p->SetYoungGenerationPageFlags(true);
     240             :   }
     241           0 : }
     242             : 
     243             : 
     244       27323 : void IncrementalMarking::ActivateIncrementalWriteBarrier() {
     245       27323 :   ActivateIncrementalWriteBarrier(heap_->old_space());
     246       27323 :   ActivateIncrementalWriteBarrier(heap_->map_space());
     247       27323 :   ActivateIncrementalWriteBarrier(heap_->code_space());
     248       27323 :   ActivateIncrementalWriteBarrier(heap_->new_space());
     249             : 
     250       27323 :   for (LargePage* p : *heap_->new_lo_space()) {
     251         713 :     p->SetYoungGenerationPageFlags(true);
     252             :     DCHECK(p->IsLargePage());
     253             :   }
     254             : 
     255       27323 :   for (LargePage* p : *heap_->lo_space()) {
     256        5556 :     p->SetOldGenerationPageFlags(true);
     257             :   }
     258             : 
     259       27323 :   for (LargePage* p : *heap_->code_lo_space()) {
     260        8746 :     p->SetOldGenerationPageFlags(true);
     261             :   }
     262       27323 : }
     263             : 
     264             : 
     265       68841 : bool IncrementalMarking::WasActivated() { return was_activated_; }
     266             : 
     267             : 
     268     1377374 : bool IncrementalMarking::CanBeActivated() {
     269             :   // Only start incremental marking in a safe state: 1) when incremental
     270             :   // marking is turned on, 2) when we are currently not in a GC, and
     271             :   // 3) when we are currently not serializing or deserializing the heap.
     272     1355994 :   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
     273     2233955 :          heap_->deserialization_complete() &&
     274     1377374 :          !heap_->isolate()->serializer_enabled();
     275             : }
     276             : 
     277             : 
     278       22268 : void IncrementalMarking::Deactivate() {
     279       22268 :   DeactivateIncrementalWriteBarrier();
     280       22268 : }
     281             : 
     282       30531 : void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
     283       30531 :   if (FLAG_trace_incremental_marking) {
     284             :     int old_generation_size_mb =
     285           5 :         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
     286             :     int old_generation_limit_mb =
     287           5 :         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
     288          10 :     heap()->isolate()->PrintWithTimestamp(
     289             :         "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
     290             :         "slack %dMB\n",
     291             :         Heap::GarbageCollectionReasonToString(gc_reason),
     292             :         old_generation_size_mb, old_generation_limit_mb,
     293           5 :         Max(0, old_generation_limit_mb - old_generation_size_mb));
     294             :   }
     295             :   DCHECK(FLAG_incremental_marking);
     296             :   DCHECK(state_ == STOPPED);
     297             :   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
     298             :   DCHECK(!heap_->isolate()->serializer_enabled());
     299             : 
     300       30531 :   Counters* counters = heap_->isolate()->counters();
     301             : 
     302             :   counters->incremental_marking_reason()->AddSample(
     303       30531 :       static_cast<int>(gc_reason));
     304             :   HistogramTimerScope incremental_marking_scope(
     305             :       counters->gc_incremental_marking_start());
     306       91593 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
     307      122124 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
     308       61062 :   heap_->tracer()->NotifyIncrementalMarkingStart();
     309             : 
     310       30531 :   start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
     311       30531 :   initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
     312       61062 :   old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
     313       30531 :   bytes_marked_ = 0;
     314       30531 :   scheduled_bytes_to_mark_ = 0;
     315       30531 :   schedule_update_time_ms_ = start_time_ms_;
     316       30531 :   bytes_marked_concurrently_ = 0;
     317       30531 :   should_hurry_ = false;
     318       30531 :   was_activated_ = true;
     319             : 
     320       61062 :   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
     321       13369 :     StartMarking();
     322             :   } else {
     323       17162 :     if (FLAG_trace_incremental_marking) {
     324             :       heap()->isolate()->PrintWithTimestamp(
     325           0 :           "[IncrementalMarking] Start sweeping.\n");
     326             :     }
     327             :     SetState(SWEEPING);
     328             :   }
     329             : 
     330       30531 :   heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
     331       30531 :                                            &new_generation_observer_);
     332       61062 :   incremental_marking_job()->Start(heap_);
     333       30531 : }
     334             : 
     335             : 
     336       27323 : void IncrementalMarking::StartMarking() {
     337       54646 :   if (heap_->isolate()->serializer_enabled()) {
     338             :     // Black allocation currently starts when we start incremental marking,
     339             :     // but we cannot enable black allocation while deserializing. Hence, we
     340             :     // have to delay the start of incremental marking in that case.
     341           0 :     if (FLAG_trace_incremental_marking) {
     342             :       heap()->isolate()->PrintWithTimestamp(
     343           0 :           "[IncrementalMarking] Start delayed - serializer\n");
     344             :     }
     345           0 :     return;
     346             :   }
     347       27323 :   if (FLAG_trace_incremental_marking) {
     348             :     heap()->isolate()->PrintWithTimestamp(
     349           5 :         "[IncrementalMarking] Start marking\n");
     350             :   }
     351             : 
     352             :   is_compacting_ =
     353       54646 :       !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
     354             : 
     355             :   SetState(MARKING);
     356             : 
     357       27323 :   ActivateIncrementalWriteBarrier();
     358             : 
     359             : // Marking bits are cleared by the sweeper.
     360             : #ifdef VERIFY_HEAP
     361             :   if (FLAG_verify_heap) {
     362             :     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
     363             :   }
     364             : #endif
     365             : 
     366       54646 :   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
     367             : 
     368       27323 :   StartBlackAllocation();
     369             : 
     370             :   // Mark strong roots grey.
     371             :   IncrementalMarkingRootMarkingVisitor visitor(this);
     372       27323 :   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
     373             : 
     374       27323 :   if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
     375       54098 :     heap_->concurrent_marking()->ScheduleTasks();
     376             :   }
     377             : 
     378             :   // Ready to start incremental marking.
     379       27323 :   if (FLAG_trace_incremental_marking) {
     380           5 :     heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
     381             :   }
     382             : 
     383             :   {
     384             :     // TracePrologue may call back into V8 in corner cases, requiring that
     385             :     // marking (including write barriers) is fully set up.
     386      109292 :     TRACE_GC(heap()->tracer(),
     387             :              GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
     388       54646 :     heap_->local_embedder_heap_tracer()->TracePrologue();
     389             :   }
     390             : }
     391             : 
     392       28430 : void IncrementalMarking::StartBlackAllocation() {
     393             :   DCHECK(!black_allocation_);
     394             :   DCHECK(IsMarking());
     395       28430 :   black_allocation_ = true;
     396       28430 :   heap()->old_space()->MarkLinearAllocationAreaBlack();
     397       28430 :   heap()->map_space()->MarkLinearAllocationAreaBlack();
     398       28430 :   heap()->code_space()->MarkLinearAllocationAreaBlack();
     399       28430 :   if (FLAG_trace_incremental_marking) {
     400             :     heap()->isolate()->PrintWithTimestamp(
     401           5 :         "[IncrementalMarking] Black allocation started\n");
     402             :   }
     403       28430 : }
     404             : 
     405        1107 : void IncrementalMarking::PauseBlackAllocation() {
     406             :   DCHECK(IsMarking());
     407        1107 :   heap()->old_space()->UnmarkLinearAllocationArea();
     408        1107 :   heap()->map_space()->UnmarkLinearAllocationArea();
     409        1107 :   heap()->code_space()->UnmarkLinearAllocationArea();
     410        1107 :   if (FLAG_trace_incremental_marking) {
     411             :     heap()->isolate()->PrintWithTimestamp(
     412           0 :         "[IncrementalMarking] Black allocation paused\n");
     413             :   }
     414        1107 :   black_allocation_ = false;
     415        1107 : }
     416             : 
     417       25209 : void IncrementalMarking::FinishBlackAllocation() {
     418       25209 :   if (black_allocation_) {
     419       22268 :     black_allocation_ = false;
     420       22268 :     if (FLAG_trace_incremental_marking) {
     421             :       heap()->isolate()->PrintWithTimestamp(
     422           5 :           "[IncrementalMarking] Black allocation finished\n");
     423             :     }
     424             :   }
     425       25209 : }
     426             : 
     427       71485 : void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
     428       71485 :   if (black_allocation() && allocated != kNullAddress) {
     429             :     HeapObject object = HeapObject::FromAddress(allocated);
     430      135681 :     if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
     431         702 :       if (heap_->IsLargeObject(object)) {
     432             :         marking_state()->WhiteToBlack(object);
     433             :       } else {
     434         544 :         Page::FromAddress(allocated)->CreateBlackArea(allocated,
     435         544 :                                                       allocated + size);
     436             :       }
     437             :     }
     438             :   }
     439       71485 : }
     440             : 
     441           0 : void IncrementalMarking::MarkRoots() {
     442             :   DCHECK(!finalize_marking_completed_);
     443             :   DCHECK(IsMarking());
     444             : 
     445             :   IncrementalMarkingRootMarkingVisitor visitor(this);
     446       20091 :   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
     447           0 : }
     448             : 
     449       16838 : bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
     450       16838 :   if (age == 0) {
     451             :     // The map has aged. Do not retain this map.
     452             :     return false;
     453             :   }
     454       15679 :   Object constructor = map->GetConstructor();
     455       31358 :   if (!constructor->IsHeapObject() ||
     456             :       marking_state()->IsWhite(HeapObject::cast(constructor))) {
     457             :     // The constructor is dead, no new objects with this map can
     458             :     // be created. Do not retain this map.
     459             :     return false;
     460             :   }
     461        4257 :   return true;
     462             : }
     463             : 
     464             : 
     465       20091 : void IncrementalMarking::RetainMaps() {
     466             :   // Do not retain dead maps if flag disables it or there is
     467             :   // - memory pressure (reduce_memory_footprint_),
     468             :   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
     469       40166 :   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
     470       20075 :                                    FLAG_retain_maps_for_n_gc == 0;
     471       20091 :   WeakArrayList retained_maps = heap()->retained_maps();
     472             :   int length = retained_maps->length();
     473             :   // The number_of_disposed_maps separates maps in the retained_maps
     474             :   // array that were created before and after context disposal.
     475             :   // We do not age and retain disposed maps to avoid memory leaks.
     476       20091 :   int number_of_disposed_maps = heap()->number_of_disposed_maps_;
     477      111549 :   for (int i = 0; i < length; i += 2) {
     478             :     MaybeObject value = retained_maps->Get(i);
     479             :     HeapObject map_heap_object;
     480       45729 :     if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
     481             :       continue;
     482             :     }
     483             :     int age = retained_maps->Get(i + 1).ToSmi().value();
     484             :     int new_age;
     485             :     Map map = Map::cast(map_heap_object);
     486       54869 :     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
     487             :         marking_state()->IsWhite(map)) {
     488       16838 :       if (ShouldRetainMap(map, age)) {
     489        4257 :         WhiteToGreyAndPush(map);
     490             :       }
     491             :       Object prototype = map->prototype();
     492       48196 :       if (age > 0 && prototype->IsHeapObject() &&
     493             :           marking_state()->IsWhite(HeapObject::cast(prototype))) {
     494             :         // The prototype is not marked, age the map.
     495       15305 :         new_age = age - 1;
     496             :       } else {
     497             :         // The prototype and the constructor are marked, this map keeps only
     498             :         // transition tree alive, not JSObjects. Do not age the map.
     499             :         new_age = age;
     500             :       }
     501             :     } else {
     502       13679 :       new_age = FLAG_retain_maps_for_n_gc;
     503             :     }
     504             :     // Compact the array and update the age.
     505       30517 :     if (new_age != age) {
     506       15370 :       retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
     507             :     }
     508             :   }
     509       20091 : }
     510             : 
     511       20091 : void IncrementalMarking::FinalizeIncrementally() {
     512       80364 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
     513             :   DCHECK(!finalize_marking_completed_);
     514             :   DCHECK(IsMarking());
     515             : 
     516       20091 :   double start = heap_->MonotonicallyIncreasingTimeInMs();
     517             : 
     518             :   // After finishing incremental marking, we try to discover all unmarked
     519             :   // objects to reduce the marking load in the final pause.
     520             :   // 1) We scan and mark the roots again to find all changes to the root set.
     521             :   // 2) Age and retain maps embedded in optimized code.
     522             :   MarkRoots();
     523             : 
     524             :   // Map retaining is needed for perfromance, not correctness,
     525             :   // so we can do it only once at the beginning of the finalization.
     526       20091 :   RetainMaps();
     527             : 
     528       20091 :   finalize_marking_completed_ = true;
     529             : 
     530       20091 :   if (FLAG_trace_incremental_marking) {
     531           5 :     double end = heap_->MonotonicallyIncreasingTimeInMs();
     532           5 :     double delta = end - start;
     533             :     heap()->isolate()->PrintWithTimestamp(
     534           5 :         "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
     535             :   }
     536       20091 : }
     537             : 
     538       26098 : void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
     539       26098 :   if (!IsMarking()) return;
     540             : 
     541        1107 :   Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
     542             : 
     543             : #ifdef ENABLE_MINOR_MC
     544             :   MinorMarkCompactCollector::MarkingState* minor_marking_state =
     545             :       heap()->minor_mark_compact_collector()->marking_state();
     546             : #else
     547             :   void* minor_marking_state = nullptr;
     548             : #endif  // ENABLE_MINOR_MC
     549             : 
     550             :   marking_worklist()->Update([
     551             : #ifdef DEBUG
     552             :                                  // this is referred inside DCHECK.
     553             :                                  this,
     554             : #endif
     555             :                                  filler_map, minor_marking_state](
     556      797410 :                                  HeapObject obj, HeapObject* out) -> bool {
     557             :     DCHECK(obj->IsHeapObject());
     558             :     // Only pointers to from space have to be updated.
     559      797410 :     if (Heap::InFromPage(obj)) {
     560             :       MapWord map_word = obj->map_word();
     561      243768 :       if (!map_word.IsForwardingAddress()) {
     562             :         // There may be objects on the marking deque that do not exist anymore,
     563             :         // e.g. left trimmed objects or objects from the root set (frames).
     564             :         // If these object are dead at scavenging time, their marking deque
     565             :         // entries will not point to forwarding addresses. Hence, we can discard
     566             :         // them.
     567             :         return false;
     568             :       }
     569             :       HeapObject dest = map_word.ToForwardingAddress();
     570             :       DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
     571      206029 :       *out = dest;
     572      206029 :       return true;
     573      553642 :     } else if (Heap::InToPage(obj)) {
     574             :       // The object may be on a large page or on a page that was moved in new
     575             :       // space.
     576             :       DCHECK(Heap::IsLargeObject(obj) ||
     577             :              Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
     578             : #ifdef ENABLE_MINOR_MC
     579           0 :       if (minor_marking_state->IsWhite(obj)) {
     580             :         return false;
     581             :       }
     582             : #endif  // ENABLE_MINOR_MC
     583             :       // Either a large object or an object marked by the minor mark-compactor.
     584           0 :       *out = obj;
     585           0 :       return true;
     586             :     } else {
     587             :       // The object may be on a page that was moved from new to old space. Only
     588             :       // applicable during minor MC garbage collections.
     589      553642 :       if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
     590             : #ifdef ENABLE_MINOR_MC
     591           0 :         if (minor_marking_state->IsWhite(obj)) {
     592             :           return false;
     593             :         }
     594             : #endif  // ENABLE_MINOR_MC
     595           0 :         *out = obj;
     596           0 :         return true;
     597             :       }
     598             :       DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
     599             :       // Skip one word filler objects that appear on the
     600             :       // stack when we perform in place array shift.
     601      553642 :       if (obj->map() != filler_map) {
     602      553642 :         *out = obj;
     603      553642 :         return true;
     604             :       }
     605             :       return false;
     606             :     }
     607        1107 :   });
     608             : 
     609        1107 :   UpdateWeakReferencesAfterScavenge();
     610             : }
     611             : 
     612        1107 : void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
     613        1107 :   weak_objects_->weak_references.Update(
     614             :       [](std::pair<HeapObject, HeapObjectSlot> slot_in,
     615             :          std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
     616      240847 :         HeapObject heap_obj = slot_in.first;
     617      240847 :         HeapObject forwarded = ForwardingAddress(heap_obj);
     618             : 
     619      240847 :         if (!forwarded.is_null()) {
     620             :           ptrdiff_t distance_to_slot =
     621      235593 :               slot_in.second.address() - slot_in.first.ptr();
     622      235593 :           Address new_slot = forwarded.ptr() + distance_to_slot;
     623      235593 :           slot_out->first = forwarded;
     624      235593 :           slot_out->second = HeapObjectSlot(new_slot);
     625             :           return true;
     626             :         }
     627             : 
     628             :         return false;
     629        1107 :       });
     630        1107 :   weak_objects_->weak_objects_in_code.Update(
     631             :       [](std::pair<HeapObject, Code> slot_in,
     632             :          std::pair<HeapObject, Code>* slot_out) -> bool {
     633        1436 :         HeapObject heap_obj = slot_in.first;
     634        1436 :         HeapObject forwarded = ForwardingAddress(heap_obj);
     635             : 
     636        1436 :         if (!forwarded.is_null()) {
     637        1436 :           slot_out->first = forwarded;
     638        1436 :           slot_out->second = slot_in.second;
     639             :           return true;
     640             :         }
     641             : 
     642             :         return false;
     643        1107 :       });
     644        1107 :   weak_objects_->ephemeron_hash_tables.Update(
     645             :       [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
     646       14560 :         EphemeronHashTable forwarded = ForwardingAddress(slot_in);
     647             : 
     648       14560 :         if (!forwarded.is_null()) {
     649       14560 :           *slot_out = forwarded;
     650             :           return true;
     651             :         }
     652             : 
     653             :         return false;
     654        1107 :       });
     655             : 
     656           0 :   auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
     657           0 :     HeapObject key = slot_in.key;
     658           0 :     HeapObject value = slot_in.value;
     659           0 :     HeapObject forwarded_key = ForwardingAddress(key);
     660           0 :     HeapObject forwarded_value = ForwardingAddress(value);
     661             : 
     662           0 :     if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
     663           0 :       *slot_out = Ephemeron{forwarded_key, forwarded_value};
     664             :       return true;
     665             :     }
     666             : 
     667             :     return false;
     668             :   };
     669             : 
     670        1107 :   weak_objects_->current_ephemerons.Update(ephemeron_updater);
     671        1107 :   weak_objects_->next_ephemerons.Update(ephemeron_updater);
     672        1107 :   weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
     673             : 
     674        1107 :   weak_objects_->flushed_js_functions.Update(
     675             :       [](JSFunction slot_in, JSFunction* slot_out) -> bool {
     676          78 :         JSFunction forwarded = ForwardingAddress(slot_in);
     677             : 
     678          78 :         if (!forwarded.is_null()) {
     679          78 :           *slot_out = forwarded;
     680             :           return true;
     681             :         }
     682             : 
     683             :         return false;
     684        1107 :       });
     685             : #ifdef DEBUG
     686             :   weak_objects_->bytecode_flushing_candidates.Iterate(
     687             :       [](SharedFunctionInfo candidate) {
     688             :         DCHECK(!Heap::InYoungGeneration(candidate));
     689             :       });
     690             : #endif
     691        1107 : }
     692             : 
     693       26098 : void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
     694             :     size_t dead_bytes_in_new_space) {
     695       26098 :   if (!IsMarking()) return;
     696        2214 :   bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
     697             : }
     698             : 
     699             : bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
     700             :   if (!obj->IsFixedArray()) return false;
     701             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
     702             :   return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
     703             : }
     704             : 
     705             : int IncrementalMarking::VisitObject(Map map, HeapObject obj) {
     706             :   DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
     707             :   if (!marking_state()->GreyToBlack(obj)) {
     708             :     // The object can already be black in these cases:
     709             :     // 1. The object is a fixed array with the progress bar.
     710             :     // 2. The object is a JSObject that was colored black before
     711             :     //    unsafe layout change.
     712             :     // 3. The object is a string that was colored black before
     713             :     //    unsafe layout change.
     714             :     // 4. The object is materizalized by the deoptimizer.
     715             :     // 5. The object is a descriptor array marked black by
     716             :     //    the descriptor array marking barrier.
     717             :     DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
     718             :            obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
     719             :            obj->IsString() || obj->IsDescriptorArray());
     720             :   }
     721             :   DCHECK(marking_state()->IsBlack(obj));
     722    44085796 :   WhiteToGreyAndPush(map);
     723             :   IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
     724    44086011 :                                            marking_state());
     725             :   return visitor.Visit(map, obj);
     726             : }
     727             : 
     728    19070371 : void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
     729    38140728 :   if (IsMarking() && marking_state()->IsBlack(obj)) {
     730    19070174 :     RevisitObject(obj);
     731             :   }
     732    19070308 : }
     733             : 
     734    22910823 : void IncrementalMarking::RevisitObject(HeapObject obj) {
     735             :   DCHECK(IsMarking());
     736             :   DCHECK(marking_state()->IsBlack(obj));
     737             :   Page* page = Page::FromHeapObject(obj);
     738    45821645 :   if (page->owner()->identity() == LO_SPACE ||
     739             :       page->owner()->identity() == NEW_LO_SPACE) {
     740             :     page->ResetProgressBar();
     741             :   }
     742             :   Map map = obj->map();
     743    22910823 :   WhiteToGreyAndPush(map);
     744             :   IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
     745             :                                            marking_state());
     746             :   visitor.Visit(map, obj);
     747    22910690 : }
     748             : 
     749     3650430 : void IncrementalMarking::VisitDescriptors(HeapObject host,
     750             :                                           DescriptorArray descriptors,
     751             :                                           int number_of_own_descriptors) {
     752             :   IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
     753             :                                            marking_state());
     754             :   // This is necessary because the Scavenger records slots only for the
     755             :   // promoted black objects and the marking visitor of DescriptorArray skips
     756             :   // the descriptors marked by the visitor.VisitDescriptors() below.
     757             :   visitor.MarkDescriptorArrayBlack(host, descriptors);
     758             :   visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
     759     3650427 : }
     760             : 
     761             : intptr_t IncrementalMarking::ProcessMarkingWorklist(
     762             :     intptr_t bytes_to_process, ForceCompletionAction completion) {
     763             :   intptr_t bytes_processed = 0;
     764    42963975 :   while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
     765    45152310 :     HeapObject obj = marking_worklist()->Pop();
     766    45152344 :     if (obj.is_null()) break;
     767             :     // Left trimming may result in grey or black filler objects on the marking
     768             :     // worklist. Ignore these objects.
     769    44085637 :     if (obj->IsFiller()) {
     770             :       // Due to copying mark bits and the fact that grey and black have their
     771             :       // first bit set, one word fillers are always black.
     772             :       DCHECK_IMPLIES(
     773             :           obj->map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
     774             :           marking_state()->IsBlack(obj));
     775             :       // Other fillers may be black or grey depending on the color of the object
     776             :       // that was trimmed.
     777             :       DCHECK_IMPLIES(
     778             :           obj->map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
     779             :           marking_state()->IsBlackOrGrey(obj));
     780             :       continue;
     781             :     }
     782    41865938 :     bytes_processed += VisitObject(obj->map(), obj);
     783             :   }
     784             :   return bytes_processed;
     785             : }
     786             : 
     787     1358329 : StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
     788     1358329 :   if (!ShouldDoEmbedderStep()) return StepResult::kNoImmediateWork;
     789             : 
     790             :   constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
     791             : 
     792           0 :   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
     793           0 :   double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
     794             :   bool empty_worklist;
     795           0 :   do {
     796             :     {
     797             :       LocalEmbedderHeapTracer::ProcessingScope scope(
     798           0 :           heap_->local_embedder_heap_tracer());
     799           0 :       HeapObject object;
     800             :       size_t cnt = 0;
     801             :       empty_worklist = true;
     802           0 :       while (marking_worklist()->embedder()->Pop(0, &object)) {
     803           0 :         scope.TracePossibleWrapper(JSObject::cast(object));
     804           0 :         if (++cnt == kObjectsToProcessBeforeInterrupt) {
     805             :           cnt = 0;
     806             :           empty_worklist = false;
     807             :           break;
     808             :         }
     809             :       }
     810             :     }
     811           0 :     heap_->local_embedder_heap_tracer()->Trace(deadline);
     812           0 :   } while (!empty_worklist &&
     813           0 :            (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
     814           0 :   heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
     815             :   return empty_worklist ? StepResult::kNoImmediateWork
     816           0 :                         : StepResult::kMoreWorkRemaining;
     817             : }
     818             : 
     819       22268 : void IncrementalMarking::Hurry() {
     820             :   // A scavenge may have pushed new objects on the marking deque (due to black
     821             :   // allocation) even in COMPLETE state. This may happen if scavenges are
     822             :   // forced e.g. in tests. It should not happen when COMPLETE was set when
     823             :   // incremental marking finished and a regular GC was triggered after that
     824             :   // because should_hurry_ will force a full GC.
     825       22268 :   if (!marking_worklist()->IsEmpty()) {
     826             :     double start = 0.0;
     827        7647 :     if (FLAG_trace_incremental_marking) {
     828           1 :       start = heap_->MonotonicallyIncreasingTimeInMs();
     829           1 :       if (FLAG_trace_incremental_marking) {
     830           1 :         heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
     831             :       }
     832             :     }
     833             :     // TODO(gc) hurry can mark objects it encounters black as mutator
     834             :     // was stopped.
     835             :     ProcessMarkingWorklist(0, FORCE_COMPLETION);
     836             :     SetState(COMPLETE);
     837        7647 :     if (FLAG_trace_incremental_marking) {
     838           1 :       double end = heap_->MonotonicallyIncreasingTimeInMs();
     839           1 :       double delta = end - start;
     840           1 :       if (FLAG_trace_incremental_marking) {
     841           1 :         heap()->isolate()->PrintWithTimestamp(
     842             :             "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
     843           1 :             static_cast<int>(delta));
     844             :       }
     845             :     }
     846             :   }
     847       22268 : }
     848             : 
     849             : 
     850       25233 : void IncrementalMarking::Stop() {
     851       25257 :   if (IsStopped()) return;
     852       25209 :   if (FLAG_trace_incremental_marking) {
     853             :     int old_generation_size_mb =
     854           5 :         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
     855             :     int old_generation_limit_mb =
     856           5 :         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
     857           5 :     heap()->isolate()->PrintWithTimestamp(
     858             :         "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
     859             :         "overshoot %dMB\n",
     860             :         old_generation_size_mb, old_generation_limit_mb,
     861           5 :         Max(0, old_generation_size_mb - old_generation_limit_mb));
     862             :   }
     863             : 
     864       50418 :   SpaceIterator it(heap_);
     865      226881 :   while (it.has_next()) {
     866      201672 :     Space* space = it.next();
     867      201672 :     if (space == heap_->new_space()) {
     868       25209 :       space->RemoveAllocationObserver(&new_generation_observer_);
     869             :     } else {
     870      176463 :       space->RemoveAllocationObserver(&old_generation_observer_);
     871             :     }
     872             :   }
     873             : 
     874             :   IncrementalMarking::set_should_hurry(false);
     875       25209 :   heap_->isolate()->stack_guard()->ClearGC();
     876             :   SetState(STOPPED);
     877       25209 :   is_compacting_ = false;
     878       25209 :   FinishBlackAllocation();
     879             : }
     880             : 
     881             : 
     882       22268 : void IncrementalMarking::Finalize() {
     883       22268 :   Hurry();
     884       22268 :   Stop();
     885       22268 : }
     886             : 
     887             : 
     888     1036905 : void IncrementalMarking::FinalizeMarking(CompletionAction action) {
     889             :   DCHECK(!finalize_marking_completed_);
     890     1036905 :   if (FLAG_trace_incremental_marking) {
     891             :     heap()->isolate()->PrintWithTimestamp(
     892             :         "[IncrementalMarking] requesting finalization of incremental "
     893           5 :         "marking.\n");
     894             :   }
     895     1036905 :   request_type_ = FINALIZATION;
     896     1036905 :   if (action == GC_VIA_STACK_GUARD) {
     897     1028483 :     heap_->isolate()->stack_guard()->RequestGC();
     898             :   }
     899     1036905 : }
     900             : 
     901             : 
     902       24109 : void IncrementalMarking::MarkingComplete(CompletionAction action) {
     903             :   SetState(COMPLETE);
     904             :   // We will set the stack guard to request a GC now.  This will mean the rest
     905             :   // of the GC gets performed as soon as possible (we can't do a GC here in a
     906             :   // record-write context).  If a few things get allocated between now and then
     907             :   // that shouldn't make us do a scavenge and keep being incremental, so we set
     908             :   // the should-hurry flag to indicate that there can't be much work left to do.
     909             :   set_should_hurry(true);
     910       24109 :   if (FLAG_trace_incremental_marking) {
     911             :     heap()->isolate()->PrintWithTimestamp(
     912           5 :         "[IncrementalMarking] Complete (normal).\n");
     913             :   }
     914       24109 :   request_type_ = COMPLETE_MARKING;
     915       24109 :   if (action == GC_VIA_STACK_GUARD) {
     916       11512 :     heap_->isolate()->stack_guard()->RequestGC();
     917             :   }
     918       24109 : }
     919             : 
     920             : 
     921       68846 : void IncrementalMarking::Epilogue() {
     922       68846 :   was_activated_ = false;
     923       68846 :   finalize_marking_completed_ = false;
     924       68846 : }
     925             : 
     926           0 : bool IncrementalMarking::ShouldDoEmbedderStep() {
     927     2363967 :   return state_ == MARKING && FLAG_incremental_marking_wrappers &&
     928     1005638 :          heap_->local_embedder_heap_tracer()->InUse();
     929             : }
     930             : 
     931     1646187 : void IncrementalMarking::FastForwardSchedule() {
     932     1646187 :   if (scheduled_bytes_to_mark_ < bytes_marked_) {
     933      194094 :     scheduled_bytes_to_mark_ = bytes_marked_;
     934      194094 :     if (FLAG_trace_incremental_marking) {
     935           5 :       heap_->isolate()->PrintWithTimestamp(
     936           5 :           "[IncrementalMarking] Fast-forwarded schedule\n");
     937             :     }
     938             :   }
     939     1646187 : }
     940             : 
     941           0 : void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
     942             :   // Consider marking close to finalization if 75% of the initial old
     943             :   // generation was marked.
     944     1018915 :   if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
     945      609282 :     FastForwardSchedule();
     946             :   }
     947           0 : }
     948             : 
     949     1018915 : void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
     950             :   // Time interval that should be sufficient to complete incremental marking.
     951             :   constexpr double kTargetMarkingWallTimeInMs = 500;
     952             :   constexpr double kMinTimeBetweenScheduleInMs = 10;
     953     1018915 :   if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
     954             :   double delta_ms =
     955        7995 :       Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
     956        7995 :   schedule_update_time_ms_ = time_ms;
     957             : 
     958             :   size_t bytes_to_mark =
     959        7995 :       (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
     960             :   AddScheduledBytesToMark(bytes_to_mark);
     961             : 
     962        7995 :   if (FLAG_trace_incremental_marking) {
     963           0 :     heap_->isolate()->PrintWithTimestamp(
     964             :         "[IncrementalMarking] Scheduled %" PRIuS
     965             :         "KB to mark based on time delta %.1fms\n",
     966           0 :         bytes_to_mark / KB, delta_ms);
     967             :   }
     968             : }
     969             : 
     970             : namespace {
     971             : StepResult CombineStepResults(StepResult a, StepResult b) {
     972     2716658 :   if (a == StepResult::kMoreWorkRemaining ||
     973     1358329 :       b == StepResult::kMoreWorkRemaining)
     974             :     return StepResult::kMoreWorkRemaining;
     975     2032282 :   if (a == StepResult::kWaitingForFinalization ||
     976     1016141 :       b == StepResult::kWaitingForFinalization)
     977             :     return StepResult::kWaitingForFinalization;
     978             :   return StepResult::kNoImmediateWork;
     979             : }
     980             : }  // anonymous namespace
     981             : 
     982     1018915 : StepResult IncrementalMarking::AdvanceWithDeadline(
     983             :     double deadline_in_ms, CompletionAction completion_action,
     984             :     StepOrigin step_origin) {
     985             :   HistogramTimerScope incremental_marking_scope(
     986     1018915 :       heap_->isolate()->counters()->gc_incremental_marking());
     987     3056745 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
     988     5094575 :   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
     989             :   DCHECK(!IsStopped());
     990             : 
     991     1018915 :   ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
     992             :   FastForwardScheduleIfCloseToFinalization();
     993             : 
     994             :   double remaining_time_in_ms = 0.0;
     995             :   StepResult result;
     996             :   do {
     997             :     StepResult v8_result =
     998     1358329 :         V8Step(kStepSizeInMs / 2, completion_action, step_origin);
     999             :     remaining_time_in_ms =
    1000     1358329 :         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
    1001             :     StepResult embedder_result =
    1002     1358329 :         EmbedderStep(Min(kStepSizeInMs, remaining_time_in_ms));
    1003             :     result = CombineStepResults(v8_result, embedder_result);
    1004             :     remaining_time_in_ms =
    1005     1358329 :         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
    1006     1358329 :   } while (remaining_time_in_ms >= kStepSizeInMs &&
    1007             :            result == StepResult::kMoreWorkRemaining);
    1008     1018915 :   return result;
    1009             : }
    1010             : 
    1011       99242 : void IncrementalMarking::FinalizeSweeping() {
    1012             :   DCHECK(state_ == SWEEPING);
    1013      388472 :   if (heap_->mark_compact_collector()->sweeping_in_progress() &&
    1014      181453 :       (!FLAG_concurrent_sweeping ||
    1015       90707 :        !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
    1016       10916 :     heap_->mark_compact_collector()->EnsureSweepingCompleted();
    1017             :   }
    1018      198484 :   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
    1019             : #ifdef DEBUG
    1020             :     heap_->VerifyCountersAfterSweeping();
    1021             : #endif
    1022       13954 :     StartMarking();
    1023             :   }
    1024       99242 : }
    1025             : 
    1026           0 : size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
    1027             :   // Update bytes_allocated_ based on the allocation counter.
    1028       71015 :   size_t current_counter = heap_->OldGenerationAllocationCounter();
    1029       71015 :   size_t result = current_counter - old_generation_allocation_counter_;
    1030       71015 :   old_generation_allocation_counter_ = current_counter;
    1031           0 :   return result;
    1032             : }
    1033             : 
    1034       71015 : size_t IncrementalMarking::StepSizeToMakeProgress() {
    1035             :   const size_t kTargetStepCount = 256;
    1036             :   const size_t kTargetStepCountAtOOM = 32;
    1037             :   const size_t kMaxStepSizeInByte = 256 * KB;
    1038       71015 :   size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
    1039             : 
    1040       71015 :   if (!heap()->CanExpandOldGeneration(oom_slack)) {
    1041         771 :     return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
    1042             :   }
    1043             : 
    1044       70244 :   return Min(Max(initial_old_generation_size_ / kTargetStepCount,
    1045             :                  IncrementalMarking::kMinStepSizeInBytes),
    1046       70244 :              kMaxStepSizeInByte);
    1047             : }
    1048             : 
    1049           0 : void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
    1050       79010 :   if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
    1051             :     // The overflow case.
    1052           0 :     scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
    1053             :   } else {
    1054       79010 :     scheduled_bytes_to_mark_ += bytes_to_mark;
    1055             :   }
    1056           0 : }
    1057             : 
    1058       71015 : void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
    1059       71015 :   size_t progress_bytes = StepSizeToMakeProgress();
    1060             :   size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
    1061       71015 :   size_t bytes_to_mark = progress_bytes + allocation_bytes;
    1062             :   AddScheduledBytesToMark(bytes_to_mark);
    1063             : 
    1064       71015 :   if (FLAG_trace_incremental_marking) {
    1065           0 :     heap_->isolate()->PrintWithTimestamp(
    1066             :         "[IncrementalMarking] Scheduled %" PRIuS
    1067             :         "KB to mark based on allocation (progress="
    1068             :         "%" PRIuS "KB, allocation=%" PRIuS "KB)\n",
    1069           0 :         bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
    1070             :   }
    1071       71015 : }
    1072             : 
    1073     1098081 : void IncrementalMarking::FetchBytesMarkedConcurrently() {
    1074     1098081 :   if (FLAG_concurrent_marking) {
    1075             :     size_t current_bytes_marked_concurrently =
    1076     1086599 :         heap()->concurrent_marking()->TotalMarkedBytes();
    1077             :     // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
    1078             :     // short period of time when a concurrent marking task is finishing.
    1079     1086599 :     if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
    1080             :       bytes_marked_ +=
    1081       38489 :           current_bytes_marked_concurrently - bytes_marked_concurrently_;
    1082       38489 :       bytes_marked_concurrently_ = current_bytes_marked_concurrently;
    1083             :     }
    1084     1086599 :     if (FLAG_trace_incremental_marking) {
    1085          13 :       heap_->isolate()->PrintWithTimestamp(
    1086             :           "[IncrementalMarking] Marked %" PRIuS "KB on background threads\n",
    1087          39 :           heap_->concurrent_marking()->TotalMarkedBytes() / KB);
    1088             :     }
    1089             :   }
    1090     1098081 : }
    1091             : 
    1092     1098081 : size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
    1093     1098081 :   FetchBytesMarkedConcurrently();
    1094     1098081 :   if (FLAG_trace_incremental_marking) {
    1095          13 :     if (scheduled_bytes_to_mark_ > bytes_marked_) {
    1096           0 :       heap_->isolate()->PrintWithTimestamp(
    1097             :           "[IncrementalMarking] Marker is %" PRIuS "KB behind schedule\n",
    1098           0 :           (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
    1099             :     } else {
    1100          13 :       heap_->isolate()->PrintWithTimestamp(
    1101             :           "[IncrementalMarking] Marker is %" PRIuS "KB ahead of schedule\n",
    1102          26 :           (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
    1103             :     }
    1104             :   }
    1105             :   // Allow steps on allocation to get behind the schedule by small ammount.
    1106             :   // This gives higher priority to steps in tasks.
    1107     1098081 :   size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
    1108     1098081 :   if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
    1109             :     return 0;
    1110       39713 :   return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
    1111             : }
    1112             : 
    1113       71484 : void IncrementalMarking::AdvanceOnAllocation() {
    1114             :   // Code using an AlwaysAllocateScope assumes that the GC state does not
    1115             :   // change; that implies that no marking steps must be performed.
    1116      285936 :   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
    1117      214023 :       (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
    1118         469 :     return;
    1119             :   }
    1120             :   HistogramTimerScope incremental_marking_scope(
    1121       71015 :       heap_->isolate()->counters()->gc_incremental_marking());
    1122      213048 :   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
    1123      355078 :   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
    1124       71015 :   ScheduleBytesToMarkBasedOnAllocation();
    1125       71015 :   V8Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
    1126             : }
    1127             : 
    1128     1440515 : StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
    1129             :                                       CompletionAction action,
    1130             :                                       StepOrigin step_origin) {
    1131             :   StepResult result = StepResult::kMoreWorkRemaining;
    1132     1440515 :   double start = heap_->MonotonicallyIncreasingTimeInMs();
    1133             : 
    1134     1440516 :   if (state_ == SWEEPING) {
    1135      496205 :     TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
    1136       99241 :     FinalizeSweeping();
    1137             :   }
    1138             : 
    1139             :   size_t bytes_processed = 0, bytes_to_process = 0;
    1140     1440516 :   if (state_ == MARKING) {
    1141     1098080 :     if (FLAG_concurrent_marking) {
    1142     1086598 :       heap_->new_space()->ResetOriginalTop();
    1143     1086598 :       heap_->new_lo_space()->ResetPendingObject();
    1144             :       // It is safe to merge back all objects that were on hold to the shared
    1145             :       // work list at Step because we are at a safepoint where all objects
    1146             :       // are properly initialized.
    1147             :       marking_worklist()->shared()->MergeGlobalPool(
    1148     1086598 :           marking_worklist()->on_hold());
    1149             :     }
    1150             : 
    1151             : // Only print marking worklist in debug mode to save ~40KB of code size.
    1152             : #ifdef DEBUG
    1153             :     if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
    1154             :         FLAG_trace_gc_verbose) {
    1155             :       marking_worklist()->Print();
    1156             :     }
    1157             : #endif
    1158     1098081 :     if (FLAG_trace_incremental_marking) {
    1159          13 :       heap_->isolate()->PrintWithTimestamp(
    1160             :           "[IncrementalMarking] Marking speed %.fKB/ms\n",
    1161          13 :           heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
    1162             :     }
    1163             :     // The first step after Scavenge will see many allocated bytes.
    1164             :     // Cap the step size to distribute the marking work more uniformly.
    1165     1098081 :     size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
    1166             :         max_step_size_in_ms,
    1167     1098081 :         heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
    1168     1098081 :     bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
    1169     1098080 :     if (bytes_to_process == 0) {
    1170             :       result = StepResult::kNoImmediateWork;
    1171             :     }
    1172             : 
    1173             :     bytes_processed =
    1174     2196064 :         ProcessMarkingWorklist(Max(bytes_to_process, kMinStepSizeInBytes));
    1175             : 
    1176     1097984 :     bytes_marked_ += bytes_processed;
    1177             : 
    1178     1097984 :     if (marking_worklist()->IsEmpty()) {
    1179             :       result = StepResult::kNoImmediateWork;
    1180     2122068 :       if (heap_->local_embedder_heap_tracer()
    1181             :               ->ShouldFinalizeIncrementalMarking()) {
    1182     1061014 :         if (!finalize_marking_completed_) {
    1183     1036905 :           FinalizeMarking(action);
    1184     1036905 :           FastForwardSchedule();
    1185             :           result = StepResult::kWaitingForFinalization;
    1186     2073810 :           incremental_marking_job()->Start(heap_);
    1187             :         } else {
    1188       24109 :           MarkingComplete(action);
    1189             :           result = StepResult::kWaitingForFinalization;
    1190             :         }
    1191             :       } else {
    1192          20 :         heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
    1193             :       }
    1194             :     }
    1195             :   }
    1196     1440516 :   if (FLAG_concurrent_marking) {
    1197     1429034 :     marking_worklist()->ShareWorkIfGlobalPoolIsEmpty();
    1198     2858068 :     heap_->concurrent_marking()->RescheduleTasksIfNeeded();
    1199             :   }
    1200             : 
    1201     1440516 :   double end = heap_->MonotonicallyIncreasingTimeInMs();
    1202     1440516 :   double duration = (end - start);
    1203             :   // Note that we report zero bytes here when sweeping was in progress or
    1204             :   // when we just started incremental marking. In these cases we did not
    1205             :   // process the marking deque.
    1206     2881032 :   heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
    1207     1440516 :   if (FLAG_trace_incremental_marking) {
    1208          13 :     heap_->isolate()->PrintWithTimestamp(
    1209             :         "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
    1210             :         step_origin == StepOrigin::kV8 ? "in v8" : "in task",
    1211          13 :         bytes_processed / KB, bytes_to_process / KB, duration);
    1212             :   }
    1213     1440516 :   return result;
    1214             : }
    1215             : 
    1216             : }  // namespace internal
    1217      122036 : }  // namespace v8

Generated by: LCOV version 1.10