LCOV - code coverage report
Current view: top level - src/heap - concurrent-marking.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 339 367 92.4 %
Date: 2019-03-21 Functions: 93 108 86.1 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/concurrent-marking.h"
       6             : 
       7             : #include <stack>
       8             : #include <unordered_map>
       9             : 
      10             : #include "include/v8config.h"
      11             : #include "src/base/template-utils.h"
      12             : #include "src/heap/gc-tracer.h"
      13             : #include "src/heap/heap-inl.h"
      14             : #include "src/heap/heap.h"
      15             : #include "src/heap/mark-compact-inl.h"
      16             : #include "src/heap/mark-compact.h"
      17             : #include "src/heap/marking.h"
      18             : #include "src/heap/objects-visiting-inl.h"
      19             : #include "src/heap/objects-visiting.h"
      20             : #include "src/heap/worklist.h"
      21             : #include "src/isolate.h"
      22             : #include "src/objects/data-handler-inl.h"
      23             : #include "src/objects/embedder-data-array-inl.h"
      24             : #include "src/objects/hash-table-inl.h"
      25             : #include "src/objects/slots-inl.h"
      26             : #include "src/transitions-inl.h"
      27             : #include "src/utils-inl.h"
      28             : #include "src/utils.h"
      29             : #include "src/v8.h"
      30             : 
      31             : namespace v8 {
      32             : namespace internal {
      33             : 
      34             : class ConcurrentMarkingState final
      35             :     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
      36             :  public:
      37             :   explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
      38      447527 :       : memory_chunk_data_(memory_chunk_data) {}
      39             : 
      40             :   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
      41             :     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
      42             :                   reinterpret_cast<intptr_t>(chunk),
      43             :               MemoryChunk::kMarkBitmapOffset);
      44             :     return chunk->marking_bitmap<AccessMode::ATOMIC>();
      45             :   }
      46             : 
      47   430747312 :   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
      48   856525251 :     (*memory_chunk_data_)[chunk].live_bytes += by;
      49   425777939 :   }
      50             : 
      51             :   // The live_bytes and SetLiveBytes methods of the marking state are
      52             :   // not used by the concurrent marker.
      53             : 
      54             :  private:
      55             :   MemoryChunkDataMap* memory_chunk_data_;
      56             : };
      57             : 
      58             : // Helper class for storing in-object slot addresses and values.
      59             : class SlotSnapshot {
      60             :  public:
      61   112169912 :   SlotSnapshot() : number_of_slots_(0) {}
      62             :   int number_of_slots() const { return number_of_slots_; }
      63   336683122 :   ObjectSlot slot(int i) const { return snapshot_[i].first; }
      64   336683122 :   Object value(int i) const { return snapshot_[i].second; }
      65    54729959 :   void clear() { number_of_slots_ = 0; }
      66             :   void add(ObjectSlot slot, Object value) {
      67   332799671 :     snapshot_[number_of_slots_++] = {slot, value};
      68             :   }
      69             : 
      70             :  private:
      71             :   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
      72             :   int number_of_slots_;
      73             :   std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
      74             :   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
      75             : };
      76             : 
      77      894956 : class ConcurrentMarkingVisitor final
      78             :     : public HeapVisitor<int, ConcurrentMarkingVisitor> {
      79             :  public:
      80             :   using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
      81             : 
      82             :   explicit ConcurrentMarkingVisitor(
      83             :       ConcurrentMarking::MarkingWorklist* shared,
      84             :       MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
      85             :       ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
      86             :       bool embedder_tracing_enabled, unsigned mark_compact_epoch,
      87             :       bool is_forced_gc)
      88             :       : shared_(shared, task_id),
      89             :         weak_objects_(weak_objects),
      90             :         embedder_objects_(embedder_objects, task_id),
      91             :         marking_state_(memory_chunk_data),
      92             :         memory_chunk_data_(memory_chunk_data),
      93             :         task_id_(task_id),
      94             :         embedder_tracing_enabled_(embedder_tracing_enabled),
      95             :         mark_compact_epoch_(mark_compact_epoch),
      96     1790108 :         is_forced_gc_(is_forced_gc) {}
      97             : 
      98             :   template <typename T>
      99             :   static V8_INLINE T Cast(HeapObject object) {
     100             :     return T::cast(object);
     101             :   }
     102             : 
     103   408412787 :   bool ShouldVisit(HeapObject object) {
     104   822880043 :     return marking_state_.GreyToBlack(object);
     105             :   }
     106             : 
     107             :   bool AllowDefaultJSObjectVisit() { return false; }
     108             : 
     109             :   template <typename THeapObjectSlot>
     110  2478977109 :   void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
     111             :                                HeapObject heap_object) {
     112  2478977109 :     MarkObject(heap_object);
     113             :     MarkCompactCollector::RecordSlot(host, slot, heap_object);
     114  2486700156 :   }
     115             : 
     116             :   template <typename THeapObjectSlot>
     117    75634233 :   void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
     118             :                              HeapObject heap_object) {
     119             : #ifdef THREAD_SANITIZER
     120             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     121             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     122             :     // corresponding release store.
     123             :     MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
     124             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     125             : #endif
     126    75634233 :     if (marking_state_.IsBlackOrGrey(heap_object)) {
     127             :       // Weak references with live values are directly processed here to
     128             :       // reduce the processing time of weak cells during the main GC
     129             :       // pause.
     130             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     131             :     } else {
     132             :       // If we do not know about liveness of the value, we have to process
     133             :       // the reference when we know the liveness of the whole transitive
     134             :       // closure.
     135    19626596 :       weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
     136             :     }
     137    75656079 :   }
     138             : 
     139   678842760 :   void VisitPointers(HeapObject host, ObjectSlot start,
     140             :                      ObjectSlot end) override {
     141             :     VisitPointersImpl(host, start, end);
     142   673480297 :   }
     143             : 
     144    50203919 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     145             :                      MaybeObjectSlot end) override {
     146             :     VisitPointersImpl(host, start, end);
     147    49812002 :   }
     148             : 
     149             :   template <typename TSlot>
     150             :   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
     151             :     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
     152  1100970164 :     for (TSlot slot = start; slot < end; ++slot) {
     153  3149609686 :       typename TSlot::TObject object = slot.Relaxed_Load();
     154  3152382319 :       HeapObject heap_object;
     155  3152382319 :       if (object.GetHeapObjectIfStrong(&heap_object)) {
     156             :         // If the reference changes concurrently from strong to weak, the write
     157             :         // barrier will treat the weak reference as strong, so we won't miss the
     158             :         // weak reference.
     159  2508245647 :         ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
     160   245243348 :       } else if (TSlot::kCanBeWeak &&
     161             :                  object.GetHeapObjectIfWeak(&heap_object)) {
     162    75206837 :         ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
     163             :       }
     164             :     }
     165             :   }
     166             : 
     167             :   // Weak list pointers should be ignored during marking. The lists are
     168             :   // reconstructed after GC.
     169    41654890 :   void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     170    41654890 :                                ObjectSlot end) final {}
     171             : 
     172     4530983 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     173             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     174             :     HeapObject object = rinfo->target_object();
     175     4530983 :     RecordRelocSlot(host, rinfo, object);
     176     4527958 :     if (!marking_state_.IsBlackOrGrey(object)) {
     177      288952 :       if (host->IsWeakObject(object)) {
     178       34750 :         weak_objects_->weak_objects_in_code.Push(task_id_,
     179       34750 :                                                  std::make_pair(object, host));
     180             :       } else {
     181      254172 :         MarkObject(object);
     182             :       }
     183             :     }
     184     4527962 :   }
     185             : 
     186      487444 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     187             :     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
     188      487444 :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     189      487390 :     RecordRelocSlot(host, rinfo, target);
     190      487361 :     MarkObject(target);
     191      487318 :   }
     192             : 
     193    54460503 :   void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
     194   728034517 :     for (int i = 0; i < snapshot.number_of_slots(); i++) {
     195             :       ObjectSlot slot = snapshot.slot(i);
     196             :       Object object = snapshot.value(i);
     197             :       DCHECK(!HasWeakHeapObjectTag(object));
     198   336683122 :       if (!object->IsHeapObject()) continue;
     199             :       HeapObject heap_object = HeapObject::cast(object);
     200   331634817 :       MarkObject(heap_object);
     201             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     202             :     }
     203    54564388 :   }
     204             : 
     205             :   // ===========================================================================
     206             :   // JS object =================================================================
     207             :   // ===========================================================================
     208             : 
     209        3813 :   int VisitJSObject(Map map, JSObject object) {
     210        3813 :     return VisitJSObjectSubclass(map, object);
     211             :   }
     212             : 
     213    10645892 :   int VisitJSObjectFast(Map map, JSObject object) {
     214    10649633 :     return VisitJSObjectSubclassFast(map, object);
     215             :   }
     216             : 
     217       22682 :   int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
     218       22682 :     return VisitJSObjectSubclass(map, object);
     219             :   }
     220             : 
     221          46 :   int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
     222          46 :     int size = VisitJSObjectSubclass(map, weak_ref);
     223          46 :     if (size == 0) {
     224             :       return 0;
     225             :     }
     226          46 :     if (weak_ref->target()->IsHeapObject()) {
     227             :       HeapObject target = HeapObject::cast(weak_ref->target());
     228          46 :       if (marking_state_.IsBlackOrGrey(target)) {
     229             :         // Record the slot inside the JSWeakRef, since the
     230             :         // VisitJSObjectSubclass above didn't visit it.
     231             :         ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
     232             :         MarkCompactCollector::RecordSlot(weak_ref, slot, target);
     233             :       } else {
     234             :         // JSWeakRef points to a potentially dead object. We have to process
     235             :         // them when we know the liveness of the whole transitive closure.
     236          30 :         weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
     237             :       }
     238             :     }
     239             :     return size;
     240             :   }
     241             : 
     242          15 :   int VisitWeakCell(Map map, WeakCell weak_cell) {
     243          15 :     if (!ShouldVisit(weak_cell)) return 0;
     244             : 
     245             :     int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
     246             :     VisitMapPointer(weak_cell, weak_cell->map_slot());
     247          15 :     WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
     248          15 :     if (weak_cell->target()->IsHeapObject()) {
     249             :       HeapObject target = HeapObject::cast(weak_cell->target());
     250          15 :       if (marking_state_.IsBlackOrGrey(target)) {
     251             :         // Record the slot inside the WeakCell, since the IterateBody above
     252             :         // didn't visit it.
     253             :         ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
     254             :         MarkCompactCollector::RecordSlot(weak_cell, slot, target);
     255             :       } else {
     256             :         // WeakCell points to a potentially dead object. We have to process
     257             :         // them when we know the liveness of the whole transitive closure.
     258           9 :         weak_objects_->weak_cells.Push(task_id_, weak_cell);
     259             :       }
     260             :     }
     261             :     return size;
     262             :   }
     263             : 
     264             :   // Some JS objects can carry back links to embedders that contain information
     265             :   // relevant to the garbage collectors.
     266             : 
     267       72700 :   int VisitJSApiObject(Map map, JSObject object) {
     268       72700 :     return VisitEmbedderTracingSubclass(map, object);
     269             :   }
     270             : 
     271      232521 :   int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
     272      232521 :     return VisitEmbedderTracingSubclass(map, object);
     273             :   }
     274             : 
     275          46 :   int VisitJSDataView(Map map, JSDataView object) {
     276          46 :     return VisitEmbedderTracingSubclass(map, object);
     277             :   }
     278             : 
     279       51320 :   int VisitJSTypedArray(Map map, JSTypedArray object) {
     280       51320 :     return VisitEmbedderTracingSubclass(map, object);
     281             :   }
     282             : 
     283             :   // ===========================================================================
     284             :   // Strings with pointers =====================================================
     285             :   // ===========================================================================
     286             : 
     287     7539486 :   int VisitConsString(Map map, ConsString object) {
     288     7546167 :     return VisitFullyWithSnapshot(map, object);
     289             :   }
     290             : 
     291       41859 :   int VisitSlicedString(Map map, SlicedString object) {
     292       41885 :     return VisitFullyWithSnapshot(map, object);
     293             :   }
     294             : 
     295       42621 :   int VisitThinString(Map map, ThinString object) {
     296       42664 :     return VisitFullyWithSnapshot(map, object);
     297             :   }
     298             : 
     299             :   // ===========================================================================
     300             :   // Strings without pointers ==================================================
     301             :   // ===========================================================================
     302             : 
     303    39762767 :   int VisitSeqOneByteString(Map map, SeqOneByteString object) {
     304    39762767 :     if (!ShouldVisit(object)) return 0;
     305             :     VisitMapPointer(object, object->map_slot());
     306             :     return SeqOneByteString::SizeFor(object->synchronized_length());
     307             :   }
     308             : 
     309    21233409 :   int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
     310    21233409 :     if (!ShouldVisit(object)) return 0;
     311             :     VisitMapPointer(object, object->map_slot());
     312             :     return SeqTwoByteString::SizeFor(object->synchronized_length());
     313             :   }
     314             : 
     315             :   // ===========================================================================
     316             :   // Fixed array object ========================================================
     317             :   // ===========================================================================
     318             : 
     319       44606 :   int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
     320             :                                      MemoryChunk* chunk) {
     321             :     // The concurrent marker can process larger chunks than the main thread
     322             :     // marker.
     323             :     const int kProgressBarScanningChunk =
     324             :         RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
     325             :     DCHECK(marking_state_.IsBlackOrGrey(object));
     326       44606 :     marking_state_.GreyToBlack(object);
     327             :     int size = FixedArray::BodyDescriptor::SizeOf(map, object);
     328             :     size_t current_progress_bar = chunk->ProgressBar();
     329       44602 :     if (current_progress_bar == 0) {
     330             :       // Try to move the progress bar forward to start offset. This solves the
     331             :       // problem of not being able to observe a progress bar reset when
     332             :       // processing the first kProgressBarScanningChunk.
     333        7188 :       if (!chunk->TrySetProgressBar(0,
     334             :                                     FixedArray::BodyDescriptor::kStartOffset))
     335             :         return 0;
     336             :       current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
     337             :     }
     338       44604 :     int start = static_cast<int>(current_progress_bar);
     339       44604 :     int end = Min(size, start + kProgressBarScanningChunk);
     340       44604 :     if (start < end) {
     341       44560 :       VisitPointers(object, object.RawField(start), object.RawField(end));
     342             :       // Setting the progress bar can fail if the object that is currently
     343             :       // scanned is also revisited. In this case, there may be two tasks racing
     344             :       // on the progress counter. The looser can bail out because the progress
     345             :       // bar is reset before the tasks race on the object.
     346       44563 :       if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
     347             :         // The object can be pushed back onto the marking worklist only after
     348             :         // progress bar was updated.
     349             :         shared_.Push(object);
     350             :       }
     351             :     }
     352       44606 :     return end - start;
     353             :   }
     354             : 
     355     8341437 :   int VisitFixedArray(Map map, FixedArray object) {
     356             :     // Arrays with the progress bar are not left-trimmable because they reside
     357             :     // in the large object space.
     358             :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
     359             :     return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
     360             :                ? VisitFixedArrayWithProgressBar(map, object, chunk)
     361     8341437 :                : VisitLeftTrimmableArray(map, object);
     362             :   }
     363             : 
     364             :   int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
     365       79681 :     return VisitLeftTrimmableArray(map, object);
     366             :   }
     367             : 
     368             :   // ===========================================================================
     369             :   // Side-effectful visitation.
     370             :   // ===========================================================================
     371             : 
     372    41668105 :   int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
     373    41668105 :     if (!ShouldVisit(shared_info)) return 0;
     374             : 
     375             :     int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
     376             :     VisitMapPointer(shared_info, shared_info->map_slot());
     377             :     SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
     378    41720597 :                                                     this);
     379             : 
     380             :     // If the SharedFunctionInfo has old bytecode, mark it as flushable,
     381             :     // otherwise visit the function data field strongly.
     382    41068252 :     if (shared_info->ShouldFlushBytecode()) {
     383      239671 :       weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
     384             :     } else {
     385             :       VisitPointer(shared_info, shared_info->RawField(
     386    40968557 :                                     SharedFunctionInfo::kFunctionDataOffset));
     387             :     }
     388             :     return size;
     389             :   }
     390             : 
     391     1360660 :   int VisitBytecodeArray(Map map, BytecodeArray object) {
     392     1360660 :     if (!ShouldVisit(object)) return 0;
     393             :     int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
     394             :     VisitMapPointer(object, object->map_slot());
     395     1359631 :     BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
     396     1364620 :     if (!is_forced_gc_) {
     397     1102791 :       object->MakeOlder();
     398             :     }
     399             :     return size;
     400             :   }
     401             : 
     402    36069362 :   int VisitJSFunction(Map map, JSFunction object) {
     403    36069362 :     int size = VisitJSObjectSubclass(map, object);
     404             : 
     405             :     // Check if the JSFunction needs reset due to bytecode being flushed.
     406    36098129 :     if (object->NeedsResetDueToFlushedBytecode()) {
     407        6227 :       weak_objects_->flushed_js_functions.Push(task_id_, object);
     408             :     }
     409             : 
     410    35894766 :     return size;
     411             :   }
     412             : 
     413    26464987 :   int VisitMap(Map meta_map, Map map) {
     414    26464987 :     if (!ShouldVisit(map)) return 0;
     415             :     int size = Map::BodyDescriptor::SizeOf(meta_map, map);
     416    26977395 :     if (map->CanTransition()) {
     417             :       // Maps that can transition share their descriptor arrays and require
     418             :       // special visiting logic to avoid memory leaks.
     419             :       // Since descriptor arrays are potentially shared, ensure that only the
     420             :       // descriptors that belong to this map are marked. The first time a
     421             :       // non-empty descriptor array is marked, its header is also visited. The
     422             :       // slot holding the descriptor array will be implicitly recorded when the
     423             :       // pointer fields of this map are visited.
     424             :       DescriptorArray descriptors = map->synchronized_instance_descriptors();
     425    26771224 :       MarkDescriptorArrayBlack(descriptors);
     426    26606178 :       int number_of_own_descriptors = map->NumberOfOwnDescriptors();
     427    26606178 :       if (number_of_own_descriptors) {
     428             :         // It is possible that the concurrent marker observes the
     429             :         // number_of_own_descriptors out of sync with the descriptors. In that
     430             :         // case the marking write barrier for the descriptor array will ensure
     431             :         // that all required descriptors are marked. The concurrent marker
     432             :         // just should avoid crashing in that case. That's why we need the
     433             :         // std::min<int>() below.
     434    20326078 :         VisitDescriptors(descriptors,
     435             :                          std::min<int>(number_of_own_descriptors,
     436    40652156 :                                        descriptors->number_of_descriptors()));
     437             :       }
     438             :       // Mark the pointer fields of the Map. Since the transitions array has
     439             :       // been marked already, it is fine that one of these fields contains a
     440             :       // pointer to it.
     441             :     }
     442    26780522 :     Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
     443    26207364 :     return size;
     444             :   }
     445             : 
     446    20424209 :   void VisitDescriptors(DescriptorArray descriptor_array,
     447             :                         int number_of_own_descriptors) {
     448    20424209 :     int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
     449    20424209 :     int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
     450    20424209 :         mark_compact_epoch_, new_marked);
     451    20521048 :     if (old_marked < new_marked) {
     452    12458947 :       VisitPointers(
     453             :           descriptor_array,
     454    12516172 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
     455    12516172 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
     456             :     }
     457    20463823 :   }
     458             : 
     459      152080 :   int VisitDescriptorArray(Map map, DescriptorArray array) {
     460      152080 :     if (!ShouldVisit(array)) return 0;
     461             :     VisitMapPointer(array, array->map_slot());
     462             :     int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
     463             :     VisitPointers(array, array->GetFirstPointerSlot(),
     464       83780 :                   array->GetDescriptorSlot(0));
     465       83753 :     VisitDescriptors(array, array->number_of_descriptors());
     466       83748 :     return size;
     467             :   }
     468             : 
     469      457318 :   int VisitTransitionArray(Map map, TransitionArray array) {
     470      457318 :     if (!ShouldVisit(array)) return 0;
     471             :     VisitMapPointer(array, array->map_slot());
     472             :     int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
     473             :     TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
     474      457285 :     weak_objects_->transition_arrays.Push(task_id_, array);
     475      457300 :     return size;
     476             :   }
     477             : 
     478       30428 :   int VisitJSWeakCollection(Map map, JSWeakCollection object) {
     479       30428 :     return VisitJSObjectSubclass(map, object);
     480             :   }
     481             : 
     482       28173 :   int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
     483       28173 :     if (!ShouldVisit(table)) return 0;
     484       28434 :     weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
     485             : 
     486      243567 :     for (int i = 0; i < table->Capacity(); i++) {
     487             :       ObjectSlot key_slot =
     488             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
     489      107997 :       HeapObject key = HeapObject::cast(table->KeyAt(i));
     490             :       MarkCompactCollector::RecordSlot(table, key_slot, key);
     491             : 
     492             :       ObjectSlot value_slot =
     493             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
     494             : 
     495      107655 :       if (marking_state_.IsBlackOrGrey(key)) {
     496      107521 :         VisitPointer(table, value_slot);
     497             : 
     498             :       } else {
     499         134 :         Object value_obj = table->ValueAt(i);
     500             : 
     501         134 :         if (value_obj->IsHeapObject()) {
     502             :           HeapObject value = HeapObject::cast(value_obj);
     503             :           MarkCompactCollector::RecordSlot(table, value_slot, value);
     504             : 
     505             :           // Revisit ephemerons with both key and value unreachable at end
     506             :           // of concurrent marking cycle.
     507           9 :           if (marking_state_.IsWhite(value)) {
     508           5 :             weak_objects_->discovered_ephemerons.Push(task_id_,
     509          10 :                                                       Ephemeron{key, value});
     510             :           }
     511             :         }
     512             :       }
     513             :     }
     514             : 
     515       28015 :     return table->SizeFromMap(map);
     516             :   }
     517             : 
     518             :   // Implements ephemeron semantics: Marks value if key is already reachable.
     519             :   // Returns true if value was actually marked.
     520         106 :   bool ProcessEphemeron(HeapObject key, HeapObject value) {
     521         106 :     if (marking_state_.IsBlackOrGrey(key)) {
     522          50 :       if (marking_state_.WhiteToGrey(value)) {
     523             :         shared_.Push(value);
     524          47 :         return true;
     525             :       }
     526             : 
     527          56 :     } else if (marking_state_.IsWhite(value)) {
     528          56 :       weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
     529             :     }
     530             : 
     531             :     return false;
     532             :   }
     533             : 
     534  2751495450 :   void MarkObject(HeapObject object) {
     535             : #ifdef THREAD_SANITIZER
     536             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     537             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     538             :     // corresponding release store.
     539             :     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
     540             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     541             : #endif
     542  2767857706 :     if (marking_state_.WhiteToGrey(object)) {
     543             :       shared_.Push(object);
     544             :     }
     545  2766783030 :   }
     546             : 
     547    26708367 :   void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
     548             :     marking_state_.WhiteToGrey(descriptors);
     549    53542435 :     if (marking_state_.GreyToBlack(descriptors)) {
     550             :       VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
     551    12399024 :                     descriptors->GetDescriptorSlot(0));
     552             :     }
     553    26742463 :   }
     554             : 
     555             :  private:
     556             :   // Helper class for collecting in-object slot addresses and values.
     557    54964163 :   class SlotSnapshottingVisitor final : public ObjectVisitor {
     558             :    public:
     559             :     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
     560    54729959 :         : slot_snapshot_(slot_snapshot) {
     561             :       slot_snapshot_->clear();
     562             :     }
     563             : 
     564   110489005 :     void VisitPointers(HeapObject host, ObjectSlot start,
     565             :                        ObjectSlot end) override {
     566   553777681 :       for (ObjectSlot p = start; p < end; ++p) {
     567   333708020 :         Object object = p.Relaxed_Load();
     568   332799671 :         slot_snapshot_->add(p, object);
     569             :       }
     570   109580656 :     }
     571             : 
     572           0 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
     573             :                        MaybeObjectSlot end) override {
     574             :       // This should never happen, because we don't use snapshotting for objects
     575             :       // which contain weak references.
     576           0 :       UNREACHABLE();
     577             :     }
     578             : 
     579           0 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     580             :       // This should never happen, because snapshotting is performed only on
     581             :       // JSObjects (and derived classes).
     582           0 :       UNREACHABLE();
     583             :     }
     584             : 
     585           0 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     586             :       // This should never happen, because snapshotting is performed only on
     587             :       // JSObjects (and derived classes).
     588           0 :       UNREACHABLE();
     589             :     }
     590             : 
     591          46 :     void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     592             :                                  ObjectSlot end) override {
     593             :       DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
     594          46 :     }
     595             : 
     596             :    private:
     597             :     SlotSnapshot* slot_snapshot_;
     598             :   };
     599             : 
     600             :   template <typename T>
     601             :   int VisitJSObjectSubclassFast(Map map, T object) {
     602             :     DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
     603             :     using TBodyDescriptor = typename T::FastBodyDescriptor;
     604    10645892 :     return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
     605             :   }
     606             : 
     607             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     608    47122053 :   int VisitJSObjectSubclass(Map map, T object) {
     609             :     int size = TBodyDescriptor::SizeOf(map, object);
     610    47122053 :     int used_size = map->UsedInstanceSize();
     611             :     DCHECK_LE(used_size, size);
     612             :     DCHECK_GE(used_size, T::kHeaderSize);
     613             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
     614    47110674 :                                                           used_size, size);
     615             :   }
     616             : 
     617             :   template <typename T>
     618      356583 :   int VisitEmbedderTracingSubclass(Map map, T object) {
     619             :     DCHECK(object->IsApiWrapper());
     620      356583 :     int size = VisitJSObjectSubclass(map, object);
     621      356577 :     if (size && embedder_tracing_enabled_) {
     622             :       // Success: The object needs to be processed for embedder references on
     623             :       // the main thread.
     624             :       embedder_objects_.Push(object);
     625             :     }
     626      356577 :     return size;
     627             :   }
     628             : 
     629             :   template <typename T>
     630     8376539 :   int VisitLeftTrimmableArray(Map map, T object) {
     631             :     // The synchronized_length() function checks that the length is a Smi.
     632             :     // This is not necessarily the case if the array is being left-trimmed.
     633             :     Object length = object->unchecked_synchronized_length();
     634     8373823 :     if (!ShouldVisit(object)) return 0;
     635             :     // The cached length must be the actual length as the array is not black.
     636             :     // Left trimming marks the array black before over-writing the length.
     637             :     DCHECK(length->IsSmi());
     638             :     int size = T::SizeFor(Smi::ToInt(length));
     639             :     VisitMapPointer(object, object->map_slot());
     640             :     T::BodyDescriptor::IterateBody(map, object, size, this);
     641     8236288 :     return size;
     642             :   }
     643             : 
     644             :   template <typename T>
     645             :   int VisitFullyWithSnapshot(Map map, T object) {
     646             :     using TBodyDescriptor = typename T::BodyDescriptor;
     647             :     int size = TBodyDescriptor::SizeOf(map, object);
     648             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
     649     7623966 :                                                           size);
     650             :   }
     651             : 
     652             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     653    54734043 :   int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
     654             :     const SlotSnapshot& snapshot =
     655    54734043 :         MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
     656    54955551 :     if (!ShouldVisit(object)) return 0;
     657    54994042 :     VisitPointersInSnapshot(object, snapshot);
     658    36537596 :     return size;
     659             :   }
     660             : 
     661             :   template <typename T, typename TBodyDescriptor>
     662    54729959 :   const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
     663    54729959 :     SlotSnapshottingVisitor visitor(&slot_snapshot_);
     664             :     visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
     665      306635 :     TBodyDescriptor::IterateBody(map, object, size, &visitor);
     666    54964163 :     return slot_snapshot_;
     667             :   }
     668             : 
     669     5017959 :   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
     670             :     MarkCompactCollector::RecordRelocSlotInfo info =
     671     5017959 :         MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
     672     5015600 :     if (info.should_record) {
     673      192248 :       MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
     674      192216 :       if (!data.typed_slots) {
     675         642 :         data.typed_slots.reset(new TypedSlots());
     676             :       }
     677      384432 :       data.typed_slots->Insert(info.slot_type, info.offset);
     678             :     }
     679     5015569 :   }
     680             : 
     681             :   ConcurrentMarking::MarkingWorklist::View shared_;
     682             :   WeakObjects* weak_objects_;
     683             :   ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
     684             :   ConcurrentMarkingState marking_state_;
     685             :   MemoryChunkDataMap* memory_chunk_data_;
     686             :   int task_id_;
     687             :   SlotSnapshot slot_snapshot_;
     688             :   bool embedder_tracing_enabled_;
     689             :   const unsigned mark_compact_epoch_;
     690             :   bool is_forced_gc_;
     691             : };
     692             : 
     693             : // Strings can change maps due to conversion to thin string or external strings.
     694             : // Use unchecked cast to avoid data race in slow dchecks.
     695             : template <>
     696           0 : ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     697           0 :   return ConsString::unchecked_cast(object);
     698             : }
     699             : 
     700             : template <>
     701           0 : SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     702           0 :   return SlicedString::unchecked_cast(object);
     703             : }
     704             : 
     705             : template <>
     706           0 : ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     707           0 :   return ThinString::unchecked_cast(object);
     708             : }
     709             : 
     710             : template <>
     711           0 : SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     712           0 :   return SeqOneByteString::unchecked_cast(object);
     713             : }
     714             : 
     715             : template <>
     716           0 : SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     717           0 :   return SeqTwoByteString::unchecked_cast(object);
     718             : }
     719             : 
     720             : // Fixed array can become a free space during left trimming.
     721             : template <>
     722           0 : FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
     723           0 :   return FixedArray::unchecked_cast(object);
     724             : }
     725             : 
     726             : class ConcurrentMarking::Task : public CancelableTask {
     727             :  public:
     728             :   Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
     729             :        TaskState* task_state, int task_id)
     730             :       : CancelableTask(isolate),
     731             :         concurrent_marking_(concurrent_marking),
     732             :         task_state_(task_state),
     733      561351 :         task_id_(task_id) {}
     734             : 
     735     1121591 :   ~Task() override = default;
     736             : 
     737             :  private:
     738             :   // v8::internal::CancelableTask overrides.
     739      447465 :   void RunInternal() override {
     740      447465 :     concurrent_marking_->Run(task_id_, task_state_);
     741      448082 :   }
     742             : 
     743             :   ConcurrentMarking* concurrent_marking_;
     744             :   TaskState* task_state_;
     745             :   int task_id_;
     746             :   DISALLOW_COPY_AND_ASSIGN(Task);
     747             : };
     748             : 
     749       61548 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
     750             :                                      MarkingWorklist* on_hold,
     751             :                                      WeakObjects* weak_objects,
     752             :                                      EmbedderTracingWorklist* embedder_objects)
     753             :     : heap_(heap),
     754             :       shared_(shared),
     755             :       on_hold_(on_hold),
     756             :       weak_objects_(weak_objects),
     757      615480 :       embedder_objects_(embedder_objects) {
     758             : // The runtime flag should be set only if the compile time flag was set.
     759             : #ifndef V8_CONCURRENT_MARKING
     760             :   CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
     761             : #endif
     762       61548 : }
     763             : 
     764      447091 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
     765     2236666 :   TRACE_BACKGROUND_GC(heap_->tracer(),
     766             :                       GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
     767             :   size_t kBytesUntilInterruptCheck = 64 * KB;
     768             :   int kObjectsUntilInterrupCheck = 1000;
     769             :   ConcurrentMarkingVisitor visitor(
     770      895054 :       shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
     771      447527 :       task_id, heap_->local_embedder_heap_tracer()->InUse(),
     772      895054 :       task_state->mark_compact_epoch, task_state->is_forced_gc);
     773             :   double time_ms;
     774             :   size_t marked_bytes = 0;
     775      447527 :   if (FLAG_trace_concurrent_marking) {
     776             :     heap_->isolate()->PrintWithTimestamp(
     777           0 :         "Starting concurrent marking task %d\n", task_id);
     778             :   }
     779             :   bool ephemeron_marked = false;
     780             : 
     781             :   {
     782             :     TimedScope scope(&time_ms);
     783             : 
     784             :     {
     785      446342 :       Ephemeron ephemeron;
     786             : 
     787      446443 :       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
     788         101 :         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
     789             :           ephemeron_marked = true;
     790             :         }
     791             :       }
     792             :     }
     793             : 
     794             :     bool done = false;
     795     1365347 :     while (!done) {
     796             :       size_t current_marked_bytes = 0;
     797             :       int objects_processed = 0;
     798  1239307259 :       while (current_marked_bytes < kBytesUntilInterruptCheck &&
     799   413390597 :              objects_processed < kObjectsUntilInterrupCheck) {
     800   412919317 :         HeapObject object;
     801   412919317 :         if (!shared_->Pop(task_id, &object)) {
     802             :           done = true;
     803      447210 :           break;
     804             :         }
     805   412866090 :         objects_processed++;
     806             :         // The order of the two loads is important.
     807   412866090 :         Address new_space_top = heap_->new_space()->original_top_acquire();
     808   412866090 :         Address new_space_limit = heap_->new_space()->original_limit_relaxed();
     809   412866090 :         Address new_large_object = heap_->new_lo_space()->pending_object();
     810             :         Address addr = object->address();
     811   412866090 :         if ((new_space_top <= addr && addr < new_space_limit) ||
     812             :             addr == new_large_object) {
     813           0 :           on_hold_->Push(task_id, object);
     814             :         } else {
     815             :           Map map = object->synchronized_map();
     816   405368829 :           current_marked_bytes += visitor.Visit(map, object);
     817             :         }
     818             :       }
     819      918490 :       marked_bytes += current_marked_bytes;
     820      918490 :       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
     821             :                                                 marked_bytes);
     822      918490 :       if (task_state->preemption_request) {
     823        1103 :         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
     824             :                      "ConcurrentMarking::Run Preempted");
     825             :         break;
     826             :       }
     827             :     }
     828             : 
     829      501183 :     if (done) {
     830      447051 :       Ephemeron ephemeron;
     831             : 
     832      447056 :       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
     833           5 :         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
     834             :           ephemeron_marked = true;
     835             :         }
     836             :       }
     837             :     }
     838             : 
     839      501423 :     shared_->FlushToGlobal(task_id);
     840      447458 :     on_hold_->FlushToGlobal(task_id);
     841      447507 :     embedder_objects_->FlushToGlobal(task_id);
     842             : 
     843      447534 :     weak_objects_->transition_arrays.FlushToGlobal(task_id);
     844      447594 :     weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
     845      447529 :     weak_objects_->current_ephemerons.FlushToGlobal(task_id);
     846      447349 :     weak_objects_->next_ephemerons.FlushToGlobal(task_id);
     847      447390 :     weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
     848      447154 :     weak_objects_->weak_references.FlushToGlobal(task_id);
     849      447390 :     weak_objects_->js_weak_refs.FlushToGlobal(task_id);
     850      447476 :     weak_objects_->weak_cells.FlushToGlobal(task_id);
     851      447424 :     weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
     852      447497 :     weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
     853      447404 :     weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
     854      447338 :     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
     855             :     total_marked_bytes_ += marked_bytes;
     856             : 
     857      447338 :     if (ephemeron_marked) {
     858             :       set_ephemeron_marked(true);
     859             :     }
     860             : 
     861             :     {
     862      447338 :       base::MutexGuard guard(&pending_lock_);
     863      448238 :       is_pending_[task_id] = false;
     864      448238 :       --pending_task_count_;
     865      448238 :       pending_condition_.NotifyAll();
     866             :     }
     867             :   }
     868      447478 :   if (FLAG_trace_concurrent_marking) {
     869           0 :     heap_->isolate()->PrintWithTimestamp(
     870             :         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
     871           0 :         static_cast<int>(marked_bytes / KB), time_ms);
     872             :   }
     873      448154 : }
     874             : 
     875       80193 : void ConcurrentMarking::ScheduleTasks() {
     876             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     877             :   DCHECK(!heap_->IsTearingDown());
     878       80193 :   base::MutexGuard guard(&pending_lock_);
     879             :   DCHECK_EQ(0, pending_task_count_);
     880       80193 :   if (task_count_ == 0) {
     881             :     static const int num_cores =
     882       16554 :         V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
     883             : #if defined(V8_OS_MACOSX)
     884             :     // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
     885             :     // marking on competing hyper-threads (regresses Octane/Splay). As such,
     886             :     // only use num_cores/2, leaving one of those for the main thread.
     887             :     // TODO(ulan): Use all cores on Mac 10.12+.
     888             :     task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
     889             : #else   // defined(OS_MACOSX)
     890             :     // On other platforms use all logical cores, leaving one for the main
     891             :     // thread.
     892       33108 :     task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
     893             : #endif  // defined(OS_MACOSX)
     894             :   }
     895             :   // Task id 0 is for the main thread.
     896      641544 :   for (int i = 1; i <= task_count_; i++) {
     897      561351 :     if (!is_pending_[i]) {
     898      561351 :       if (FLAG_trace_concurrent_marking) {
     899           0 :         heap_->isolate()->PrintWithTimestamp(
     900           0 :             "Scheduling concurrent marking task %d\n", i);
     901             :       }
     902      561351 :       task_state_[i].preemption_request = false;
     903      561351 :       task_state_[i].mark_compact_epoch =
     904     1122702 :           heap_->mark_compact_collector()->epoch();
     905      561351 :       task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
     906      561351 :       is_pending_[i] = true;
     907      561351 :       ++pending_task_count_;
     908             :       auto task =
     909     1122702 :           base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
     910     1122702 :       cancelable_id_[i] = task->id();
     911     1684053 :       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     912             :     }
     913             :   }
     914             :   DCHECK_EQ(task_count_, pending_task_count_);
     915       80193 : }
     916             : 
     917     1452586 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
     918             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     919     1452586 :   if (heap_->IsTearingDown()) return;
     920             :   {
     921     1452586 :     base::MutexGuard guard(&pending_lock_);
     922     1452586 :     if (pending_task_count_ > 0) return;
     923             :   }
     924     4158121 :   if (!shared_->IsGlobalPoolEmpty() ||
     925     4102885 :       !weak_objects_->current_ephemerons.IsEmpty() ||
     926     1349205 :       !weak_objects_->discovered_ephemerons.IsEmpty()) {
     927       55253 :     ScheduleTasks();
     928             :   }
     929             : }
     930             : 
     931      239909 : bool ConcurrentMarking::Stop(StopRequest stop_request) {
     932             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     933      239909 :   base::MutexGuard guard(&pending_lock_);
     934             : 
     935      239909 :   if (pending_task_count_ == 0) return false;
     936             : 
     937       39252 :   if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
     938             :     CancelableTaskManager* task_manager =
     939       39232 :         heap_->isolate()->cancelable_task_manager();
     940      588480 :     for (int i = 1; i <= task_count_; i++) {
     941      274624 :       if (is_pending_[i]) {
     942      204859 :         if (task_manager->TryAbort(cancelable_id_[i]) ==
     943             :             TryAbortResult::kTaskAborted) {
     944       99214 :           is_pending_[i] = false;
     945       99214 :           --pending_task_count_;
     946      105645 :         } else if (stop_request == StopRequest::PREEMPT_TASKS) {
     947         419 :           task_state_[i].preemption_request = true;
     948             :         }
     949             :       }
     950             :     }
     951             :   }
     952       72942 :   while (pending_task_count_ > 0) {
     953       72942 :     pending_condition_.Wait(&pending_lock_);
     954             :   }
     955             :   for (int i = 1; i <= task_count_; i++) {
     956             :     DCHECK(!is_pending_[i]);
     957             :   }
     958             :   return true;
     959             : }
     960             : 
     961           0 : bool ConcurrentMarking::IsStopped() {
     962           0 :   if (!FLAG_concurrent_marking) return true;
     963             : 
     964           0 :   base::MutexGuard guard(&pending_lock_);
     965           0 :   return pending_task_count_ == 0;
     966             : }
     967             : 
     968      219621 : void ConcurrentMarking::FlushMemoryChunkData(
     969             :     MajorNonAtomicMarkingState* marking_state) {
     970             :   DCHECK_EQ(pending_task_count_, 0);
     971     3293727 :   for (int i = 1; i <= task_count_; i++) {
     972     1537053 :     MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
     973     2479234 :     for (auto& pair : memory_chunk_data) {
     974             :       // ClearLiveness sets the live bytes to zero.
     975             :       // Pages with zero live bytes might be already unmapped.
     976      942181 :       MemoryChunk* memory_chunk = pair.first;
     977             :       MemoryChunkData& data = pair.second;
     978      942181 :       if (data.live_bytes) {
     979             :         marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
     980             :       }
     981      942181 :       if (data.typed_slots) {
     982         639 :         RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
     983         639 :                                               std::move(data.typed_slots));
     984             :       }
     985             :     }
     986             :     memory_chunk_data.clear();
     987     1537053 :     task_state_[i].marked_bytes = 0;
     988             :   }
     989             :   total_marked_bytes_ = 0;
     990      219621 : }
     991             : 
     992      900444 : void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
     993     7425354 :   for (int i = 1; i <= task_count_; i++) {
     994             :     auto it = task_state_[i].memory_chunk_data.find(chunk);
     995     3262455 :     if (it != task_state_[i].memory_chunk_data.end()) {
     996        1717 :       it->second.live_bytes = 0;
     997             :       it->second.typed_slots.reset();
     998             :     }
     999             :   }
    1000      900444 : }
    1001             : 
    1002     1037873 : size_t ConcurrentMarking::TotalMarkedBytes() {
    1003             :   size_t result = 0;
    1004    15568095 :   for (int i = 1; i <= task_count_; i++) {
    1005             :     result +=
    1006    14530222 :         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
    1007             :   }
    1008     1037873 :   result += total_marked_bytes_;
    1009     1037873 :   return result;
    1010             : }
    1011             : 
    1012       20973 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
    1013             :     : concurrent_marking_(concurrent_marking),
    1014       41231 :       resume_on_exit_(FLAG_concurrent_marking &&
    1015       20258 :                       concurrent_marking_->Stop(
    1016       41946 :                           ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
    1017             :   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
    1018       20973 : }
    1019             : 
    1020       41946 : ConcurrentMarking::PauseScope::~PauseScope() {
    1021       20973 :   if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
    1022       20973 : }
    1023             : 
    1024             : }  // namespace internal
    1025      120216 : }  // namespace v8

Generated by: LCOV version 1.10