LCOV - code coverage report
Current view: top level - src/heap - concurrent-marking.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 348 364 95.6 %
Date: 2019-01-20 Functions: 101 110 91.8 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/concurrent-marking.h"
       6             : 
       7             : #include <stack>
       8             : #include <unordered_map>
       9             : 
      10             : #include "include/v8config.h"
      11             : #include "src/base/template-utils.h"
      12             : #include "src/heap/gc-tracer.h"
      13             : #include "src/heap/heap-inl.h"
      14             : #include "src/heap/heap.h"
      15             : #include "src/heap/mark-compact-inl.h"
      16             : #include "src/heap/mark-compact.h"
      17             : #include "src/heap/marking.h"
      18             : #include "src/heap/objects-visiting-inl.h"
      19             : #include "src/heap/objects-visiting.h"
      20             : #include "src/heap/worklist.h"
      21             : #include "src/isolate.h"
      22             : #include "src/objects/hash-table-inl.h"
      23             : #include "src/objects/slots-inl.h"
      24             : #include "src/utils-inl.h"
      25             : #include "src/utils.h"
      26             : #include "src/v8.h"
      27             : 
      28             : namespace v8 {
      29             : namespace internal {
      30             : 
      31             : class ConcurrentMarkingState final
      32             :     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
      33             :  public:
      34             :   explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
      35      502797 :       : memory_chunk_data_(memory_chunk_data) {}
      36             : 
      37  3708556837 :   Bitmap* bitmap(const MemoryChunk* chunk) {
      38             :     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
      39             :                   reinterpret_cast<intptr_t>(chunk),
      40             :               MemoryChunk::kMarkBitmapOffset);
      41  3708556837 :     return chunk->marking_bitmap_;
      42             :   }
      43             : 
      44   517152437 :   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
      45  1033142165 :     (*memory_chunk_data_)[chunk].live_bytes += by;
      46   515989728 :   }
      47             : 
      48             :   // The live_bytes and SetLiveBytes methods of the marking state are
      49             :   // not used by the concurrent marker.
      50             : 
      51             :  private:
      52             :   MemoryChunkDataMap* memory_chunk_data_;
      53             : };
      54             : 
      55             : // Helper class for storing in-object slot addresses and values.
      56             : class SlotSnapshot {
      57             :  public:
      58   127029462 :   SlotSnapshot() : number_of_slots_(0) {}
      59             :   int number_of_slots() const { return number_of_slots_; }
      60   372455610 :   ObjectSlot slot(int i) const { return snapshot_[i].first; }
      61   372455610 :   Object value(int i) const { return snapshot_[i].second; }
      62    59931701 :   void clear() { number_of_slots_ = 0; }
      63             :   void add(ObjectSlot slot, Object value) {
      64   376031790 :     snapshot_[number_of_slots_++] = {slot, value};
      65             :   }
      66             : 
      67             :  private:
      68             :   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
      69             :   int number_of_slots_;
      70             :   std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
      71             :   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
      72             : };
      73             : 
      74      502739 : class ConcurrentMarkingVisitor final
      75             :     : public HeapVisitor<int, ConcurrentMarkingVisitor> {
      76             :  public:
      77             :   using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
      78             : 
      79             :   explicit ConcurrentMarkingVisitor(
      80             :       ConcurrentMarking::MarkingWorklist* shared,
      81             :       MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
      82             :       ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
      83             :       bool embedder_tracing_enabled, unsigned mark_compact_epoch,
      84             :       bool is_forced_gc)
      85             :       : shared_(shared, task_id),
      86             :         weak_objects_(weak_objects),
      87             :         embedder_objects_(embedder_objects, task_id),
      88             :         marking_state_(memory_chunk_data),
      89             :         memory_chunk_data_(memory_chunk_data),
      90             :         task_id_(task_id),
      91             :         embedder_tracing_enabled_(embedder_tracing_enabled),
      92             :         mark_compact_epoch_(mark_compact_epoch),
      93     2011188 :         is_forced_gc_(is_forced_gc) {}
      94             : 
      95             :   template <typename T>
      96             :   static V8_INLINE T Cast(HeapObject object) {
      97             :     return T::cast(object);
      98             :   }
      99             : 
     100   501398508 :   bool ShouldVisit(HeapObject object) {
     101  1005346703 :     return marking_state_.GreyToBlack(object);
     102             :   }
     103             : 
     104             :   bool AllowDefaultJSObjectVisit() { return false; }
     105             : 
     106             :   template <typename THeapObjectSlot>
     107  2773460545 :   void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
     108             :                                HeapObject heap_object) {
     109  2773460545 :     MarkObject(heap_object);
     110             :     MarkCompactCollector::RecordSlot(host, slot, heap_object);
     111  2776913283 :   }
     112             : 
     113             :   template <typename THeapObjectSlot>
     114    82288567 :   void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
     115             :                              HeapObject heap_object) {
     116             : #ifdef THREAD_SANITIZER
     117             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     118             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     119             :     // corresponding release store.
     120             :     MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
     121             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     122             : #endif
     123    82433248 :     if (marking_state_.IsBlackOrGrey(heap_object)) {
     124             :       // Weak references with live values are directly processed here to
     125             :       // reduce the processing time of weak cells during the main GC
     126             :       // pause.
     127             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     128             :     } else {
     129             :       // If we do not know about liveness of the value, we have to process
     130             :       // the reference when we know the liveness of the whole transitive
     131             :       // closure.
     132    22360838 :       weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
     133             :     }
     134    82365841 :   }
     135             : 
     136   832377073 :   void VisitPointers(HeapObject host, ObjectSlot start,
     137             :                      ObjectSlot end) override {
     138             :     VisitPointersImpl(host, start, end);
     139   828606816 :   }
     140             : 
     141    56554853 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     142             :                      MaybeObjectSlot end) override {
     143             :     VisitPointersImpl(host, start, end);
     144    56335622 :   }
     145             : 
     146             :   template <typename TSlot>
     147             :   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
     148             :     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
     149  4206325308 :     for (TSlot slot = start; slot < end; ++slot) {
     150  3312767016 :       typename TSlot::TObject object = slot.Relaxed_Load();
     151  3314969306 :       HeapObject heap_object;
     152  3314969306 :       if (object.GetHeapObjectIfStrong(&heap_object)) {
     153             :         // If the reference changes concurrently from strong to weak, the write
     154             :         // barrier will treat the weak reference as strong, so we won't miss the
     155             :         // weak reference.
     156  2789360114 :         ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
     157   241886287 :       } else if (TSlot::kCanBeWeak &&
     158             :                  object.GetHeapObjectIfWeak(&heap_object)) {
     159    82298472 :         ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
     160             :       }
     161             :     }
     162             :   }
     163             : 
     164             :   // Weak list pointers should be ignored during marking. The lists are
     165             :   // reconstructed after GC.
     166    48351978 :   void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     167    48351978 :                                ObjectSlot end) final {}
     168             : 
     169     4098387 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     170             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     171             :     HeapObject object = rinfo->target_object();
     172     4098669 :     RecordRelocSlot(host, rinfo, object);
     173     4097052 :     if (!marking_state_.IsBlackOrGrey(object)) {
     174      417527 :       if (host->IsWeakObject(object)) {
     175             :         weak_objects_->weak_objects_in_code.Push(task_id_,
     176      155037 :                                                  std::make_pair(object, host));
     177             :       } else {
     178      262505 :         MarkObject(object);
     179             :       }
     180             :     }
     181     4097073 :   }
     182             : 
     183       86230 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     184             :     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
     185       86241 :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     186       86240 :     RecordRelocSlot(host, rinfo, target);
     187       86232 :     MarkObject(target);
     188       86231 :   }
     189             : 
     190   492265769 :   void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
     191   864705772 :     for (int i = 0; i < snapshot.number_of_slots(); i++) {
     192             :       ObjectSlot slot = snapshot.slot(i);
     193   372455610 :       Object object = snapshot.value(i);
     194             :       DCHECK(!HasWeakHeapObjectTag(object));
     195   379887268 :       if (!object->IsHeapObject()) continue;
     196             :       HeapObject heap_object = HeapObject::cast(object);
     197   365503032 :       MarkObject(heap_object);
     198             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     199             :     }
     200    59897276 :   }
     201             : 
     202             :   // ===========================================================================
     203             :   // JS object =================================================================
     204             :   // ===========================================================================
     205             : 
     206       79440 :   int VisitJSObject(Map map, JSObject object) {
     207       79440 :     return VisitJSObjectSubclass(map, object);
     208             :   }
     209             : 
     210    55648867 :   int VisitJSObjectFast(Map map, JSObject object) {
     211    55636339 :     return VisitJSObjectSubclassFast(map, object);
     212             :   }
     213             : 
     214       25473 :   int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
     215       25473 :     return VisitJSObjectSubclass(map, object);
     216             :   }
     217             : 
     218          45 :   int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
     219          45 :     int size = VisitJSObjectSubclass(map, weak_ref);
     220          45 :     if (size == 0) {
     221             :       return 0;
     222             :     }
     223          90 :     if (weak_ref->target()->IsHeapObject()) {
     224          45 :       HeapObject target = HeapObject::cast(weak_ref->target());
     225          45 :       if (marking_state_.IsBlackOrGrey(target)) {
     226             :         // Record the slot inside the JSWeakRef, since the
     227             :         // VisitJSObjectSubclass above didn't visit it.
     228             :         ObjectSlot slot =
     229             :             HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
     230             :         MarkCompactCollector::RecordSlot(weak_ref, slot, target);
     231             :       } else {
     232             :         // JSWeakRef points to a potentially dead object. We have to process
     233             :         // them when we know the liveness of the whole transitive closure.
     234          29 :         weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
     235             :       }
     236             :     }
     237          45 :     return size;
     238             :   }
     239             : 
     240         228 :   int VisitJSWeakCell(Map map, JSWeakCell weak_cell) {
     241         228 :     int size = VisitJSObjectSubclass(map, weak_cell);
     242         228 :     if (size == 0) {
     243             :       return 0;
     244             :     }
     245             : 
     246         456 :     if (weak_cell->target()->IsHeapObject()) {
     247         228 :       HeapObject target = HeapObject::cast(weak_cell->target());
     248         228 :       if (marking_state_.IsBlackOrGrey(target)) {
     249             :         // Record the slot inside the JSWeakCell, since the
     250             :         // VisitJSObjectSubclass above didn't visit it.
     251             :         ObjectSlot slot =
     252             :             HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
     253             :         MarkCompactCollector::RecordSlot(weak_cell, slot, target);
     254             :       } else {
     255             :         // JSWeakCell points to a potentially dead object. We have to process
     256             :         // them when we know the liveness of the whole transitive closure.
     257         151 :         weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
     258             :       }
     259             :     }
     260         228 :     return size;
     261             :   }
     262             : 
     263             :   // Some JS objects can carry back links to embedders that contain information
     264             :   // relevant to the garbage collectors.
     265             : 
     266       81821 :   int VisitJSApiObject(Map map, JSObject object) {
     267       81821 :     return VisitEmbedderTracingSubclass(map, object);
     268             :   }
     269             : 
     270      277194 :   int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
     271      277194 :     return VisitEmbedderTracingSubclass(map, object);
     272             :   }
     273             : 
     274          46 :   int VisitJSDataView(Map map, JSDataView object) {
     275          46 :     return VisitEmbedderTracingSubclass(map, object);
     276             :   }
     277             : 
     278       51913 :   int VisitJSTypedArray(Map map, JSTypedArray object) {
     279       51913 :     return VisitEmbedderTracingSubclass(map, object);
     280             :   }
     281             : 
     282             :   // ===========================================================================
     283             :   // Strings with pointers =====================================================
     284             :   // ===========================================================================
     285             : 
     286     3637278 :   int VisitConsString(Map map, ConsString object) {
     287     3637200 :     return VisitFullyWithSnapshot(map, object);
     288             :   }
     289             : 
     290       57066 :   int VisitSlicedString(Map map, SlicedString object) {
     291       57024 :     return VisitFullyWithSnapshot(map, object);
     292             :   }
     293             : 
     294       37699 :   int VisitThinString(Map map, ThinString object) {
     295       37720 :     return VisitFullyWithSnapshot(map, object);
     296             :   }
     297             : 
     298             :   // ===========================================================================
     299             :   // Strings without pointers ==================================================
     300             :   // ===========================================================================
     301             : 
     302    47407391 :   int VisitSeqOneByteString(Map map, SeqOneByteString object) {
     303    47407391 :     if (!ShouldVisit(object)) return 0;
     304             :     VisitMapPointer(object, object->map_slot());
     305             :     return SeqOneByteString::SizeFor(object->synchronized_length());
     306             :   }
     307             : 
     308    42357075 :   int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
     309    42357075 :     if (!ShouldVisit(object)) return 0;
     310             :     VisitMapPointer(object, object->map_slot());
     311             :     return SeqTwoByteString::SizeFor(object->synchronized_length());
     312             :   }
     313             : 
     314             :   // ===========================================================================
     315             :   // Fixed array object ========================================================
     316             :   // ===========================================================================
     317             : 
     318       14968 :   int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
     319             :                                      MemoryChunk* chunk) {
     320             :     // The concurrent marker can process larger chunks than the main thread
     321             :     // marker.
     322             :     const int kProgressBarScanningChunk =
     323             :         RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
     324             :     DCHECK(marking_state_.IsBlackOrGrey(object));
     325       14968 :     marking_state_.GreyToBlack(object);
     326             :     int size = FixedArray::BodyDescriptor::SizeOf(map, object);
     327             :     int start =
     328             :         Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
     329       14968 :     int end = Min(size, start + kProgressBarScanningChunk);
     330       14968 :     if (start < end) {
     331             :       VisitPointers(object, HeapObject::RawField(object, start),
     332       14968 :                     HeapObject::RawField(object, end));
     333             :       chunk->set_progress_bar(end);
     334       14968 :       if (end < size) {
     335             :         // The object can be pushed back onto the marking worklist only after
     336             :         // progress bar was updated.
     337       13562 :         shared_.Push(object);
     338             :       }
     339             :     }
     340       14968 :     return end - start;
     341             :   }
     342             : 
     343    11561902 :   int VisitFixedArray(Map map, FixedArray object) {
     344             :     // Arrays with the progress bar are not left-trimmable because they reside
     345             :     // in the large object space.
     346             :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
     347             :     return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
     348             :                ? VisitFixedArrayWithProgressBar(map, object, chunk)
     349    11561902 :                : VisitLeftTrimmableArray(map, object);
     350             :   }
     351             : 
     352       82046 :   int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
     353       82046 :     return VisitLeftTrimmableArray(map, object);
     354             :   }
     355             : 
     356             :   // ===========================================================================
     357             :   // Side-effectful visitation.
     358             :   // ===========================================================================
     359             : 
     360    48622111 :   int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
     361    48622111 :     if (!ShouldVisit(shared_info)) return 0;
     362             : 
     363             :     int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
     364             :     VisitMapPointer(shared_info, shared_info->map_slot());
     365             :     SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
     366    48432055 :                                                     this);
     367             : 
     368             :     // If the SharedFunctionInfo has old bytecode, mark it as flushable,
     369             :     // otherwise visit the function data field strongly.
     370    48483511 :     if (shared_info->ShouldFlushBytecode()) {
     371          55 :       weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
     372             :     } else {
     373             :       VisitPointer(shared_info, shared_info->RawField(
     374    97005124 :                                     SharedFunctionInfo::kFunctionDataOffset));
     375             :     }
     376    48513772 :     return size;
     377             :   }
     378             : 
     379     3687631 :   int VisitBytecodeArray(Map map, BytecodeArray object) {
     380     3687631 :     if (!ShouldVisit(object)) return 0;
     381     3688685 :     int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
     382             :     VisitMapPointer(object, object->map_slot());
     383     3679954 :     BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
     384     3696634 :     if (!is_forced_gc_) {
     385     3394539 :       object->MakeOlder();
     386             :     }
     387             :     return size;
     388             :   }
     389             : 
     390    29289735 :   int VisitMap(Map meta_map, Map map) {
     391    29289735 :     if (!ShouldVisit(map)) return 0;
     392             :     int size = Map::BodyDescriptor::SizeOf(meta_map, map);
     393    29412603 :     if (map->CanTransition()) {
     394             :       // Maps that can transition share their descriptor arrays and require
     395             :       // special visiting logic to avoid memory leaks.
     396             :       // Since descriptor arrays are potentially shared, ensure that only the
     397             :       // descriptors that belong to this map are marked. The first time a
     398             :       // non-empty descriptor array is marked, its header is also visited. The
     399             :       // slot holding the descriptor array will be implicitly recorded when the
     400             :       // pointer fields of this map are visited.
     401    29236183 :       DescriptorArray descriptors = map->synchronized_instance_descriptors();
     402    29256289 :       MarkDescriptorArrayBlack(descriptors);
     403    29056566 :       int number_of_own_descriptors = map->NumberOfOwnDescriptors();
     404    29056566 :       if (number_of_own_descriptors) {
     405             :         // It is possible that the concurrent marker observes the
     406             :         // number_of_own_descriptors out of sync with the descriptors. In that
     407             :         // case the marking write barrier for the descriptor array will ensure
     408             :         // that all required descriptors are marked. The concurrent marker
     409             :         // just should avoid crashing in that case. That's why we need the
     410             :         // std::min<int>() below.
     411             :         VisitDescriptors(descriptors,
     412             :                          std::min<int>(number_of_own_descriptors,
     413    43605054 :                                        descriptors->number_of_descriptors()));
     414             :       }
     415             :       // Mark the pointer fields of the Map. Since the transitions array has
     416             :       // been marked already, it is fine that one of these fields contains a
     417             :       // pointer to it.
     418             :     }
     419    29248214 :     Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
     420    29124496 :     return size;
     421             :   }
     422             : 
     423    21917966 :   void VisitDescriptors(DescriptorArray descriptor_array,
     424             :                         int number_of_own_descriptors) {
     425    21917966 :     int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
     426             :     int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
     427    21917966 :         mark_compact_epoch_, new_marked);
     428    21913563 :     if (old_marked < new_marked) {
     429             :       VisitPointers(
     430             :           descriptor_array,
     431    14258834 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
     432    14258834 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
     433             :     }
     434    21904616 :   }
     435             : 
     436      143331 :   int VisitDescriptorArray(Map map, DescriptorArray array) {
     437      143331 :     if (!ShouldVisit(array)) return 0;
     438             :     VisitMapPointer(array, array->map_slot());
     439             :     int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
     440             :     VisitPointers(array, array->GetFirstPointerSlot(),
     441       83744 :                   array->GetDescriptorSlot(0));
     442       83749 :     VisitDescriptors(array, array->number_of_descriptors());
     443       83767 :     return size;
     444             :   }
     445             : 
     446      518825 :   int VisitTransitionArray(Map map, TransitionArray array) {
     447      518825 :     if (!ShouldVisit(array)) return 0;
     448             :     VisitMapPointer(array, array->map_slot());
     449             :     int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
     450             :     TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
     451      518821 :     weak_objects_->transition_arrays.Push(task_id_, array);
     452      518816 :     return size;
     453             :   }
     454             : 
     455       31909 :   int VisitJSWeakCollection(Map map, JSWeakCollection object) {
     456       31909 :     return VisitJSObjectSubclass(map, object);
     457             :   }
     458             : 
     459       31730 :   int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
     460       31730 :     if (!ShouldVisit(table)) return 0;
     461       31893 :     weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
     462             : 
     463      157921 :     for (int i = 0; i < table->Capacity(); i++) {
     464             :       ObjectSlot key_slot =
     465             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
     466      126904 :       HeapObject key = HeapObject::cast(table->KeyAt(i));
     467             :       MarkCompactCollector::RecordSlot(table, key_slot, key);
     468             : 
     469             :       ObjectSlot value_slot =
     470             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
     471             : 
     472      126638 :       if (marking_state_.IsBlackOrGrey(key)) {
     473      126568 :         VisitPointer(table, value_slot);
     474             : 
     475             :       } else {
     476          70 :         Object value_obj = table->ValueAt(i);
     477             : 
     478          70 :         if (value_obj->IsHeapObject()) {
     479             :           HeapObject value = HeapObject::cast(value_obj);
     480             :           MarkCompactCollector::RecordSlot(table, value_slot, value);
     481             : 
     482             :           // Revisit ephemerons with both key and value unreachable at end
     483             :           // of concurrent marking cycle.
     484           2 :           if (marking_state_.IsWhite(value)) {
     485             :             weak_objects_->discovered_ephemerons.Push(task_id_,
     486           0 :                                                       Ephemeron{key, value});
     487             :           }
     488             :         }
     489             :       }
     490             :     }
     491             : 
     492       31597 :     return table->SizeFromMap(map);
     493             :   }
     494             : 
     495             :   // Implements ephemeron semantics: Marks value if key is already reachable.
     496             :   // Returns true if value was actually marked.
     497         119 :   bool VisitEphemeron(HeapObject key, HeapObject value) {
     498         119 :     if (marking_state_.IsBlackOrGrey(key)) {
     499          52 :       if (marking_state_.WhiteToGrey(value)) {
     500          52 :         shared_.Push(value);
     501          52 :         return true;
     502             :       }
     503             : 
     504          67 :     } else if (marking_state_.IsWhite(value)) {
     505          67 :       weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
     506             :     }
     507             : 
     508             :     return false;
     509             :   }
     510             : 
     511  3112981707 :   void MarkObject(HeapObject object) {
     512             : #ifdef THREAD_SANITIZER
     513             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     514             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     515             :     // corresponding release store.
     516             :     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
     517             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     518             : #endif
     519  3124652076 :     if (marking_state_.WhiteToGrey(object)) {
     520   338679539 :       shared_.Push(object);
     521             :     }
     522  3124274871 :   }
     523             : 
     524    29248736 :   void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
     525             :     marking_state_.WhiteToGrey(descriptors);
     526    58261694 :     if (marking_state_.GreyToBlack(descriptors)) {
     527             :       VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
     528    14163344 :                     descriptors->GetDescriptorSlot(0));
     529             :     }
     530    29085438 :   }
     531             : 
     532             :  private:
     533             :   // Helper class for collecting in-object slot addresses and values.
     534           0 :   class SlotSnapshottingVisitor final : public ObjectVisitor {
     535             :    public:
     536             :     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
     537    59931701 :         : slot_snapshot_(slot_snapshot) {
     538             :       slot_snapshot_->clear();
     539             :     }
     540             : 
     541   120025578 :     void VisitPointers(HeapObject host, ObjectSlot start,
     542             :                        ObjectSlot end) override {
     543   616082946 :       for (ObjectSlot p = start; p < end; ++p) {
     544             :         Object object = p.Relaxed_Load();
     545   376031790 :         slot_snapshot_->add(p, object);
     546             :       }
     547   120025578 :     }
     548             : 
     549           0 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
     550             :                        MaybeObjectSlot end) override {
     551             :       // This should never happen, because we don't use snapshotting for objects
     552             :       // which contain weak references.
     553           0 :       UNREACHABLE();
     554             :     }
     555             : 
     556           0 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     557             :       // This should never happen, because snapshotting is performed only on
     558             :       // JSObjects (and derived classes).
     559           0 :       UNREACHABLE();
     560             :     }
     561             : 
     562           0 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     563             :       // This should never happen, because snapshotting is performed only on
     564             :       // JSObjects (and derived classes).
     565           0 :       UNREACHABLE();
     566             :     }
     567             : 
     568         273 :     void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     569             :                                  ObjectSlot end) override {
     570             :       DCHECK(host->IsJSWeakCell() || host->IsJSWeakRef());
     571         273 :     }
     572             : 
     573             :    private:
     574             :     SlotSnapshot* slot_snapshot_;
     575             :   };
     576             : 
     577             :   template <typename T>
     578             :   int VisitJSObjectSubclassFast(Map map, T object) {
     579             :     DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
     580             :     using TBodyDescriptor = typename T::FastBodyDescriptor;
     581    55648867 :     return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
     582             :   }
     583             : 
     584             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     585    56195191 :   int VisitJSObjectSubclass(Map map, T object) {
     586             :     int size = TBodyDescriptor::SizeOf(map, object);
     587    56195191 :     int used_size = map->UsedInstanceSize();
     588             :     DCHECK_LE(used_size, size);
     589             :     DCHECK_GE(used_size, T::kHeaderSize);
     590             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
     591    56209593 :                                                           used_size, size);
     592             :   }
     593             : 
     594             :   template <typename T>
     595      410970 :   int VisitEmbedderTracingSubclass(Map map, T object) {
     596             :     DCHECK(object->IsApiWrapper());
     597      410970 :     int size = VisitJSObjectSubclass(map, object);
     598      410920 :     if (size && embedder_tracing_enabled_) {
     599             :       // Success: The object needs to be processed for embedder references on
     600             :       // the main thread.
     601           0 :       embedder_objects_.Push(object);
     602             :     }
     603      410920 :     return size;
     604             :   }
     605             : 
     606             :   template <typename T>
     607    11629081 :   int VisitLeftTrimmableArray(Map map, T object) {
     608             :     // The synchronized_length() function checks that the length is a Smi.
     609             :     // This is not necessarily the case if the array is being left-trimmed.
     610    11629081 :     Object length = object->unchecked_synchronized_length();
     611    11629081 :     if (!ShouldVisit(object)) return 0;
     612             :     // The cached length must be the actual length as the array is not black.
     613             :     // Left trimming marks the array black before over-writing the length.
     614             :     DCHECK(length->IsSmi());
     615    11642529 :     int size = T::SizeFor(Smi::ToInt(length));
     616             :     VisitMapPointer(object, object->map_slot());
     617             :     T::BodyDescriptor::IterateBody(map, object, size, this);
     618             :     return size;
     619             :   }
     620             : 
     621             :   template <typename T>
     622             :   int VisitFullyWithSnapshot(Map map, T object) {
     623             :     using TBodyDescriptor = typename T::BodyDescriptor;
     624             :     int size = TBodyDescriptor::SizeOf(map, object);
     625             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
     626     3732043 :                                                           size);
     627             :   }
     628             : 
     629             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     630    59941571 :   int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
     631             :     const SlotSnapshot& snapshot =
     632    59941571 :         MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
     633    60101527 :     if (!ShouldVisit(object)) return 0;
     634    59965311 :     VisitPointersInSnapshot(object, snapshot);
     635      547921 :     return size;
     636             :   }
     637             : 
     638             :   template <typename T, typename TBodyDescriptor>
     639    59931701 :   const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
     640    59931701 :     SlotSnapshottingVisitor visitor(&slot_snapshot_);
     641             :     visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
     642      354855 :     TBodyDescriptor::IterateBody(map, object, size, &visitor);
     643    60104929 :     return slot_snapshot_;
     644             :   }
     645             : 
     646     4184834 :   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
     647             :     MarkCompactCollector::RecordRelocSlotInfo info =
     648     4184834 :         MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
     649     4184592 :     if (info.should_record) {
     650      123312 :       MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
     651      123371 :       if (!data.typed_slots) {
     652         824 :         data.typed_slots.reset(new TypedSlots());
     653             :       }
     654      246742 :       data.typed_slots->Insert(info.slot_type, info.offset);
     655             :     }
     656     4184639 :   }
     657             : 
     658             :   ConcurrentMarking::MarkingWorklist::View shared_;
     659             :   WeakObjects* weak_objects_;
     660             :   ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
     661             :   ConcurrentMarkingState marking_state_;
     662             :   MemoryChunkDataMap* memory_chunk_data_;
     663             :   int task_id_;
     664             :   SlotSnapshot slot_snapshot_;
     665             :   bool embedder_tracing_enabled_;
     666             :   const unsigned mark_compact_epoch_;
     667             :   bool is_forced_gc_;
     668             : };
     669             : 
     670             : // Strings can change maps due to conversion to thin string or external strings.
     671             : // Use unchecked cast to avoid data race in slow dchecks.
     672             : template <>
     673     3637458 : ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     674     3637458 :   return ConsString::unchecked_cast(object);
     675             : }
     676             : 
     677             : template <>
     678       57073 : SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     679       57073 :   return SlicedString::unchecked_cast(object);
     680             : }
     681             : 
     682             : template <>
     683       37698 : ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     684       37698 :   return ThinString::unchecked_cast(object);
     685             : }
     686             : 
     687             : template <>
     688    47412454 : SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     689    47412454 :   return SeqOneByteString::unchecked_cast(object);
     690             : }
     691             : 
     692             : template <>
     693    42401009 : SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     694    42401009 :   return SeqTwoByteString::unchecked_cast(object);
     695             : }
     696             : 
     697             : // Fixed array can become a free space during left trimming.
     698             : template <>
     699    11562276 : FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
     700    11562276 :   return FixedArray::unchecked_cast(object);
     701             : }
     702             : 
     703             : class ConcurrentMarking::Task : public CancelableTask {
     704             :  public:
     705             :   Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
     706             :        TaskState* task_state, int task_id)
     707             :       : CancelableTask(isolate),
     708             :         concurrent_marking_(concurrent_marking),
     709             :         task_state_(task_state),
     710      596036 :         task_id_(task_id) {}
     711             : 
     712     1191447 :   ~Task() override = default;
     713             : 
     714             :  private:
     715             :   // v8::internal::CancelableTask overrides.
     716      502897 :   void RunInternal() override {
     717      502897 :     concurrent_marking_->Run(task_id_, task_state_);
     718      503196 :   }
     719             : 
     720             :   ConcurrentMarking* concurrent_marking_;
     721             :   TaskState* task_state_;
     722             :   int task_id_;
     723             :   DISALLOW_COPY_AND_ASSIGN(Task);
     724             : };
     725             : 
     726       62896 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
     727             :                                      MarkingWorklist* on_hold,
     728             :                                      WeakObjects* weak_objects,
     729             :                                      EmbedderTracingWorklist* embedder_objects)
     730             :     : heap_(heap),
     731             :       shared_(shared),
     732             :       on_hold_(on_hold),
     733             :       weak_objects_(weak_objects),
     734      628976 :       embedder_objects_(embedder_objects) {
     735             : // The runtime flag should be set only if the compile time flag was set.
     736             : #ifndef V8_CONCURRENT_MARKING
     737             :   CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
     738             : #endif
     739       62898 : }
     740             : 
     741      502753 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
     742  1022293149 :   TRACE_BACKGROUND_GC(heap_->tracer(),
     743             :                       GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
     744             :   size_t kBytesUntilInterruptCheck = 64 * KB;
     745             :   int kObjectsUntilInterrupCheck = 1000;
     746             :   ConcurrentMarkingVisitor visitor(
     747             :       shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
     748     1005594 :       task_id, heap_->local_embedder_heap_tracer()->InUse(),
     749     1005594 :       task_state->mark_compact_epoch, task_state->is_forced_gc);
     750             :   double time_ms;
     751             :   size_t marked_bytes = 0;
     752      502797 :   if (FLAG_trace_concurrent_marking) {
     753             :     heap_->isolate()->PrintWithTimestamp(
     754           0 :         "Starting concurrent marking task %d\n", task_id);
     755             :   }
     756             :   bool ephemeron_marked = false;
     757             : 
     758             :   {
     759             :     TimedScope scope(&time_ms);
     760             : 
     761             :     {
     762      502555 :       Ephemeron ephemeron;
     763             : 
     764     1005229 :       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
     765         119 :         if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
     766             :           ephemeron_marked = true;
     767             :         }
     768             :       }
     769             :     }
     770             : 
     771             :     bool done = false;
     772     1889753 :     while (!done) {
     773             :       size_t current_marked_bytes = 0;
     774             :       int objects_processed = 0;
     775  1020198560 :       while (current_marked_bytes < kBytesUntilInterruptCheck &&
     776   510099280 :              objects_processed < kObjectsUntilInterrupCheck) {
     777   509717137 :         HeapObject object;
     778   509717137 :         if (!shared_->Pop(task_id, &object)) {
     779             :           done = true;
     780      502637 :           break;
     781             :         }
     782   509889633 :         objects_processed++;
     783             :         // The order of the two loads is important.
     784   509889633 :         Address new_space_top = heap_->new_space()->original_top_acquire();
     785   509889633 :         Address new_space_limit = heap_->new_space()->original_limit_relaxed();
     786             :         Address addr = object->address();
     787   509889633 :         if (new_space_top <= addr && addr < new_space_limit) {
     788     8769968 :           on_hold_->Push(task_id, object);
     789             :         } else {
     790             :           Map map = object->synchronized_map();
     791   500226999 :           current_marked_bytes += visitor.Visit(map, object);
     792             :         }
     793             :       }
     794      884780 :       marked_bytes += current_marked_bytes;
     795             :       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
     796      884780 :                                                 marked_bytes);
     797      884780 :       if (task_state->preemption_request) {
     798         346 :         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
     799             :                      "ConcurrentMarking::Run Preempted");
     800         173 :         break;
     801             :       }
     802             :     }
     803             : 
     804      358390 :     if (done) {
     805      502570 :       Ephemeron ephemeron;
     806             : 
     807     1005140 :       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
     808           0 :         if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
     809             :           ephemeron_marked = true;
     810             :         }
     811             :       }
     812             :     }
     813             : 
     814      358410 :     shared_->FlushToGlobal(task_id);
     815      502397 :     on_hold_->FlushToGlobal(task_id);
     816      502363 :     embedder_objects_->FlushToGlobal(task_id);
     817             : 
     818      502287 :     weak_objects_->transition_arrays.FlushToGlobal(task_id);
     819      502305 :     weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
     820      502286 :     weak_objects_->current_ephemerons.FlushToGlobal(task_id);
     821      502023 :     weak_objects_->next_ephemerons.FlushToGlobal(task_id);
     822      501971 :     weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
     823      501764 :     weak_objects_->weak_references.FlushToGlobal(task_id);
     824      502006 :     weak_objects_->js_weak_refs.FlushToGlobal(task_id);
     825      502035 :     weak_objects_->js_weak_cells.FlushToGlobal(task_id);
     826      501873 :     weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
     827      501868 :     weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
     828      501866 :     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
     829             :     total_marked_bytes_ += marked_bytes;
     830             : 
     831      501866 :     if (ephemeron_marked) {
     832             :       set_ephemeron_marked(true);
     833             :     }
     834             : 
     835             :     {
     836      501866 :       base::MutexGuard guard(&pending_lock_);
     837      503297 :       is_pending_[task_id] = false;
     838      503297 :       --pending_task_count_;
     839      503297 :       pending_condition_.NotifyAll();
     840             :     }
     841             :   }
     842      502739 :   if (FLAG_trace_concurrent_marking) {
     843             :     heap_->isolate()->PrintWithTimestamp(
     844             :         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
     845           0 :         static_cast<int>(marked_bytes / KB), time_ms);
     846      502444 :   }
     847      503228 : }
     848             : 
     849       85148 : void ConcurrentMarking::ScheduleTasks() {
     850             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     851             :   DCHECK(!heap_->IsTearingDown());
     852       85148 :   base::MutexGuard guard(&pending_lock_);
     853             :   DCHECK_EQ(0, pending_task_count_);
     854       85148 :   if (task_count_ == 0) {
     855             :     static const int num_cores =
     856       16372 :         V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
     857             : #if defined(V8_OS_MACOSX)
     858             :     // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
     859             :     // marking on competing hyper-threads (regresses Octane/Splay). As such,
     860             :     // only use num_cores/2, leaving one of those for the main thread.
     861             :     // TODO(ulan): Use all cores on Mac 10.12+.
     862             :     task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
     863             : #else   // defined(OS_MACOSX)
     864             :     // On other platforms use all logical cores, leaving one for the main
     865             :     // thread.
     866       32744 :     task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
     867             : #endif  // defined(OS_MACOSX)
     868             :   }
     869             :   // Task id 0 is for the main thread.
     870      681184 :   for (int i = 1; i <= task_count_; i++) {
     871      596036 :     if (!is_pending_[i]) {
     872      596036 :       if (FLAG_trace_concurrent_marking) {
     873             :         heap_->isolate()->PrintWithTimestamp(
     874     1192072 :             "Scheduling concurrent marking task %d\n", i);
     875             :       }
     876      596036 :       task_state_[i].preemption_request = false;
     877             :       task_state_[i].mark_compact_epoch =
     878     1192072 :           heap_->mark_compact_collector()->epoch();
     879      596036 :       task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
     880      596036 :       is_pending_[i] = true;
     881      596036 :       ++pending_task_count_;
     882             :       auto task =
     883     1192072 :           base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
     884     1192072 :       cancelable_id_[i] = task->id();
     885     1788108 :       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     886             :     }
     887             :   }
     888             :   DCHECK_EQ(task_count_, pending_task_count_);
     889       85148 : }
     890             : 
     891     1548040 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
     892             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     893     1548040 :   if (heap_->IsTearingDown()) return;
     894             :   {
     895     1548040 :     base::MutexGuard guard(&pending_lock_);
     896     1548040 :     if (pending_task_count_ > 0) return;
     897             :   }
     898     4428740 :   if (!shared_->IsGlobalPoolEmpty() ||
     899     4370976 :       !weak_objects_->current_ephemerons.IsEmpty() ||
     900     1437723 :       !weak_objects_->discovered_ephemerons.IsEmpty()) {
     901       57783 :     ScheduleTasks();
     902             :   }
     903             : }
     904             : 
     905      271049 : bool ConcurrentMarking::Stop(StopRequest stop_request) {
     906             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     907      271049 :   base::MutexGuard guard(&pending_lock_);
     908             : 
     909      271049 :   if (pending_task_count_ == 0) return false;
     910             : 
     911       33662 :   if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
     912             :     CancelableTaskManager* task_manager =
     913       67292 :         heap_->isolate()->cancelable_task_manager();
     914      269168 :     for (int i = 1; i <= task_count_; i++) {
     915      235522 :       if (is_pending_[i]) {
     916      182730 :         if (task_manager->TryAbort(cancelable_id_[i]) ==
     917             :             TryAbortResult::kTaskAborted) {
     918       92636 :           is_pending_[i] = false;
     919       92636 :           --pending_task_count_;
     920       90094 :         } else if (stop_request == StopRequest::PREEMPT_TASKS) {
     921         204 :           task_state_[i].preemption_request = true;
     922             :         }
     923             :       }
     924             :     }
     925             :   }
     926       93191 :   while (pending_task_count_ > 0) {
     927       59529 :     pending_condition_.Wait(&pending_lock_);
     928             :   }
     929             :   for (int i = 1; i <= task_count_; i++) {
     930             :     DCHECK(!is_pending_[i]);
     931             :   }
     932             :   return true;
     933             : }
     934             : 
     935           0 : bool ConcurrentMarking::IsStopped() {
     936           0 :   if (!FLAG_concurrent_marking) return true;
     937             : 
     938           0 :   base::MutexGuard guard(&pending_lock_);
     939           0 :   return pending_task_count_ == 0;
     940             : }
     941             : 
     942      248111 : void ConcurrentMarking::FlushMemoryChunkData(
     943             :     MajorNonAtomicMarkingState* marking_state) {
     944             :   DCHECK_EQ(pending_task_count_, 0);
     945     1984678 :   for (int i = 1; i <= task_count_; i++) {
     946     1736567 :     MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
     947     4328950 :     for (auto& pair : memory_chunk_data) {
     948             :       // ClearLiveness sets the live bytes to zero.
     949             :       // Pages with zero live bytes might be already unmapped.
     950      855816 :       MemoryChunk* memory_chunk = pair.first;
     951             :       MemoryChunkData& data = pair.second;
     952      855816 :       if (data.live_bytes) {
     953             :         marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
     954             :       }
     955      855816 :       if (data.typed_slots) {
     956             :         RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
     957        1646 :                                               std::move(data.typed_slots));
     958             :       }
     959             :     }
     960             :     memory_chunk_data.clear();
     961     1736567 :     task_state_[i].marked_bytes = 0;
     962             :   }
     963             :   total_marked_bytes_ = 0;
     964      248111 : }
     965             : 
     966      512685 : void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
     967     2322129 :   for (int i = 1; i <= task_count_; i++) {
     968     1809444 :     auto it = task_state_[i].memory_chunk_data.find(chunk);
     969     1809444 :     if (it != task_state_[i].memory_chunk_data.end()) {
     970        1766 :       it->second.live_bytes = 0;
     971             :       it->second.typed_slots.reset();
     972             :     }
     973             :   }
     974      512685 : }
     975             : 
     976       62773 : size_t ConcurrentMarking::TotalMarkedBytes() {
     977             :   size_t result = 0;
     978      502179 :   for (int i = 1; i <= task_count_; i++) {
     979             :     result +=
     980      878812 :         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
     981             :   }
     982       62773 :   result += total_marked_bytes_;
     983       62773 :   return result;
     984             : }
     985             : 
     986       23594 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
     987             :     : concurrent_marking_(concurrent_marking),
     988       46502 :       resume_on_exit_(FLAG_concurrent_marking &&
     989             :                       concurrent_marking_->Stop(
     990       70096 :                           ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
     991             :   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
     992       23594 : }
     993             : 
     994       23594 : ConcurrentMarking::PauseScope::~PauseScope() {
     995       23594 :   if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
     996       23594 : }
     997             : 
     998             : }  // namespace internal
     999      183867 : }  // namespace v8

Generated by: LCOV version 1.10