LCOV - code coverage report
Current view: top level - src/heap - concurrent-marking.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 339 367 92.4 %
Date: 2019-04-17 Functions: 93 108 86.1 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/concurrent-marking.h"
       6             : 
       7             : #include <stack>
       8             : #include <unordered_map>
       9             : 
      10             : #include "include/v8config.h"
      11             : #include "src/base/template-utils.h"
      12             : #include "src/heap/gc-tracer.h"
      13             : #include "src/heap/heap-inl.h"
      14             : #include "src/heap/heap.h"
      15             : #include "src/heap/mark-compact-inl.h"
      16             : #include "src/heap/mark-compact.h"
      17             : #include "src/heap/marking.h"
      18             : #include "src/heap/objects-visiting-inl.h"
      19             : #include "src/heap/objects-visiting.h"
      20             : #include "src/heap/worklist.h"
      21             : #include "src/isolate.h"
      22             : #include "src/objects/data-handler-inl.h"
      23             : #include "src/objects/embedder-data-array-inl.h"
      24             : #include "src/objects/hash-table-inl.h"
      25             : #include "src/objects/slots-inl.h"
      26             : #include "src/transitions-inl.h"
      27             : #include "src/utils-inl.h"
      28             : #include "src/utils.h"
      29             : #include "src/v8.h"
      30             : 
      31             : namespace v8 {
      32             : namespace internal {
      33             : 
      34             : class ConcurrentMarkingState final
      35             :     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
      36             :  public:
      37             :   explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
      38      473654 :       : memory_chunk_data_(memory_chunk_data) {}
      39             : 
      40             :   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
      41             :     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
      42             :                   reinterpret_cast<intptr_t>(chunk),
      43             :               MemoryChunk::kMarkBitmapOffset);
      44             :     return chunk->marking_bitmap<AccessMode::ATOMIC>();
      45             :   }
      46             : 
      47   430130863 :   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
      48   855042838 :     (*memory_chunk_data_)[chunk].live_bytes += by;
      49   424911975 :   }
      50             : 
      51             :   // The live_bytes and SetLiveBytes methods of the marking state are
      52             :   // not used by the concurrent marker.
      53             : 
      54             :  private:
      55             :   MemoryChunkDataMap* memory_chunk_data_;
      56             : };
      57             : 
      58             : // Helper class for storing in-object slot addresses and values.
      59             : class SlotSnapshot {
      60             :  public:
      61   118949204 :   SlotSnapshot() : number_of_slots_(0) {}
      62             :   int number_of_slots() const { return number_of_slots_; }
      63   339814015 :   ObjectSlot slot(int i) const { return snapshot_[i].first; }
      64   339814015 :   Object value(int i) const { return snapshot_[i].second; }
      65    56835072 :   void clear() { number_of_slots_ = 0; }
      66             :   void add(ObjectSlot slot, Object value) {
      67   338623878 :     snapshot_[number_of_slots_++] = {slot, value};
      68             :   }
      69             : 
      70             :  private:
      71             :   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
      72             :   int number_of_slots_;
      73             :   std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
      74             :   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
      75             : };
      76             : 
      77      947920 : class ConcurrentMarkingVisitor final
      78             :     : public HeapVisitor<int, ConcurrentMarkingVisitor> {
      79             :  public:
      80             :   using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
      81             : 
      82             :   explicit ConcurrentMarkingVisitor(
      83             :       ConcurrentMarking::MarkingWorklist* shared,
      84             :       MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
      85             :       ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
      86             :       bool embedder_tracing_enabled, unsigned mark_compact_epoch,
      87             :       bool is_forced_gc)
      88             :       : shared_(shared, task_id),
      89             :         weak_objects_(weak_objects),
      90             :         embedder_objects_(embedder_objects, task_id),
      91             :         marking_state_(memory_chunk_data),
      92             :         memory_chunk_data_(memory_chunk_data),
      93             :         task_id_(task_id),
      94             :         embedder_tracing_enabled_(embedder_tracing_enabled),
      95             :         mark_compact_epoch_(mark_compact_epoch),
      96     1894616 :         is_forced_gc_(is_forced_gc) {
      97             :     // It is not safe to access flags from concurrent marking visitor. So
      98             :     // set the bytecode flush mode based on the flags here
      99      473654 :     bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
     100             :   }
     101             : 
     102             :   template <typename T>
     103             :   static V8_INLINE T Cast(HeapObject object) {
     104             :     return T::cast(object);
     105             :   }
     106             : 
     107   407144242 :   bool ShouldVisit(HeapObject object) {
     108   818998913 :     return marking_state_.GreyToBlack(object);
     109             :   }
     110             : 
     111             :   bool AllowDefaultJSObjectVisit() { return false; }
     112             : 
     113             :   template <typename THeapObjectSlot>
     114  2071490893 :   void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
     115             :                                HeapObject heap_object) {
     116  2071490893 :     MarkObject(heap_object);
     117             :     MarkCompactCollector::RecordSlot(host, slot, heap_object);
     118  2075582048 :   }
     119             : 
     120             :   template <typename THeapObjectSlot>
     121    65233974 :   void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
     122             :                              HeapObject heap_object) {
     123             : #ifdef THREAD_SANITIZER
     124             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     125             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     126             :     // corresponding release store.
     127             :     MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
     128             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     129             : #endif
     130    65233974 :     if (marking_state_.IsBlackOrGrey(heap_object)) {
     131             :       // Weak references with live values are directly processed here to
     132             :       // reduce the processing time of weak cells during the main GC
     133             :       // pause.
     134             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     135             :     } else {
     136             :       // If we do not know about liveness of the value, we have to process
     137             :       // the reference when we know the liveness of the whole transitive
     138             :       // closure.
     139    11174590 :       weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
     140             :     }
     141    65237905 :   }
     142             : 
     143   654803526 :   void VisitPointers(HeapObject host, ObjectSlot start,
     144             :                      ObjectSlot end) override {
     145             :     VisitPointersImpl(host, start, end);
     146   656314069 :   }
     147             : 
     148    47558526 :   void VisitPointers(HeapObject host, MaybeObjectSlot start,
     149             :                      MaybeObjectSlot end) override {
     150             :     VisitPointersImpl(host, start, end);
     151    47017162 :   }
     152             : 
     153             :   template <typename TSlot>
     154             :   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
     155             :     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
     156  4144099067 :     for (TSlot slot = start; slot < end; ++slot) {
     157  2783241757 :       typename TSlot::TObject object = slot.Relaxed_Load();
     158  2788346324 :       HeapObject heap_object;
     159  2788346324 :       if (object.GetHeapObjectIfStrong(&heap_object)) {
     160             :         // If the reference changes concurrently from strong to weak, the write
     161             :         // barrier will treat the weak reference as strong, so we won't miss the
     162             :         // weak reference.
     163  2100993059 :         ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
     164   219476498 :       } else if (TSlot::kCanBeWeak &&
     165             :                  object.GetHeapObjectIfWeak(&heap_object)) {
     166    65095387 :         ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
     167             :       }
     168             :     }
     169             :   }
     170             : 
     171             :   // Weak list pointers should be ignored during marking. The lists are
     172             :   // reconstructed after GC.
     173    37693764 :   void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     174    37693764 :                                ObjectSlot end) final {}
     175             : 
     176     2125818 :   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     177             :     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     178             :     HeapObject object = rinfo->target_object();
     179     2125818 :     RecordRelocSlot(host, rinfo, object);
     180     2123616 :     if (!marking_state_.IsBlackOrGrey(object)) {
     181      251209 :       if (host->IsWeakObject(object)) {
     182       45005 :         weak_objects_->weak_objects_in_code.Push(task_id_,
     183       45005 :                                                  std::make_pair(object, host));
     184             :       } else {
     185      206173 :         MarkObject(object);
     186             :       }
     187             :     }
     188     2123613 :   }
     189             : 
     190      429989 :   void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     191             :     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
     192      429989 :     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     193      429962 :     RecordRelocSlot(host, rinfo, target);
     194      429951 :     MarkObject(target);
     195      429943 :   }
     196             : 
     197    56368157 :   void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
     198   736073371 :     for (int i = 0; i < snapshot.number_of_slots(); i++) {
     199             :       ObjectSlot slot = snapshot.slot(i);
     200             :       Object object = snapshot.value(i);
     201             :       DCHECK(!HasWeakHeapObjectTag(object));
     202   339814015 :       if (!object->IsHeapObject()) continue;
     203             :       HeapObject heap_object = HeapObject::cast(object);
     204   333799212 :       MarkObject(heap_object);
     205             :       MarkCompactCollector::RecordSlot(host, slot, heap_object);
     206             :     }
     207    56406749 :   }
     208             : 
     209             :   // ===========================================================================
     210             :   // JS object =================================================================
     211             :   // ===========================================================================
     212             : 
     213       25782 :   int VisitJSObject(Map map, JSObject object) {
     214       25782 :     return VisitJSObjectSubclass(map, object);
     215             :   }
     216             : 
     217    11862751 :   int VisitJSObjectFast(Map map, JSObject object) {
     218    11874926 :     return VisitJSObjectSubclassFast(map, object);
     219             :   }
     220             : 
     221       10332 :   int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
     222       10332 :     return VisitJSObjectSubclass(map, object);
     223             :   }
     224             : 
     225          49 :   int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
     226          49 :     int size = VisitJSObjectSubclass(map, weak_ref);
     227          49 :     if (size == 0) {
     228             :       return 0;
     229             :     }
     230          49 :     if (weak_ref->target()->IsHeapObject()) {
     231             :       HeapObject target = HeapObject::cast(weak_ref->target());
     232          49 :       if (marking_state_.IsBlackOrGrey(target)) {
     233             :         // Record the slot inside the JSWeakRef, since the
     234             :         // VisitJSObjectSubclass above didn't visit it.
     235             :         ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
     236             :         MarkCompactCollector::RecordSlot(weak_ref, slot, target);
     237             :       } else {
     238             :         // JSWeakRef points to a potentially dead object. We have to process
     239             :         // them when we know the liveness of the whole transitive closure.
     240          33 :         weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
     241             :       }
     242             :     }
     243             :     return size;
     244             :   }
     245             : 
     246          20 :   int VisitWeakCell(Map map, WeakCell weak_cell) {
     247          20 :     if (!ShouldVisit(weak_cell)) return 0;
     248             : 
     249             :     int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
     250             :     VisitMapPointer(weak_cell, weak_cell->map_slot());
     251          20 :     WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
     252          20 :     if (weak_cell->target()->IsHeapObject()) {
     253             :       HeapObject target = HeapObject::cast(weak_cell->target());
     254          20 :       if (marking_state_.IsBlackOrGrey(target)) {
     255             :         // Record the slot inside the WeakCell, since the IterateBody above
     256             :         // didn't visit it.
     257             :         ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
     258             :         MarkCompactCollector::RecordSlot(weak_cell, slot, target);
     259             :       } else {
     260             :         // WeakCell points to a potentially dead object. We have to process
     261             :         // them when we know the liveness of the whole transitive closure.
     262          12 :         weak_objects_->weak_cells.Push(task_id_, weak_cell);
     263             :       }
     264             :     }
     265             :     return size;
     266             :   }
     267             : 
     268             :   // Some JS objects can carry back links to embedders that contain information
     269             :   // relevant to the garbage collectors.
     270             : 
     271       71542 :   int VisitJSApiObject(Map map, JSObject object) {
     272       71542 :     return VisitEmbedderTracingSubclass(map, object);
     273             :   }
     274             : 
     275       46909 :   int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
     276       46909 :     return VisitEmbedderTracingSubclass(map, object);
     277             :   }
     278             : 
     279          52 :   int VisitJSDataView(Map map, JSDataView object) {
     280          52 :     return VisitEmbedderTracingSubclass(map, object);
     281             :   }
     282             : 
     283       55254 :   int VisitJSTypedArray(Map map, JSTypedArray object) {
     284       55254 :     return VisitEmbedderTracingSubclass(map, object);
     285             :   }
     286             : 
     287             :   // ===========================================================================
     288             :   // Strings with pointers =====================================================
     289             :   // ===========================================================================
     290             : 
     291     9163483 :   int VisitConsString(Map map, ConsString object) {
     292     9169230 :     return VisitFullyWithSnapshot(map, object);
     293             :   }
     294             : 
     295       58771 :   int VisitSlicedString(Map map, SlicedString object) {
     296       58808 :     return VisitFullyWithSnapshot(map, object);
     297             :   }
     298             : 
     299       59387 :   int VisitThinString(Map map, ThinString object) {
     300       59456 :     return VisitFullyWithSnapshot(map, object);
     301             :   }
     302             : 
     303             :   // ===========================================================================
     304             :   // Strings without pointers ==================================================
     305             :   // ===========================================================================
     306             : 
     307    38423866 :   int VisitSeqOneByteString(Map map, SeqOneByteString object) {
     308    38423866 :     if (!ShouldVisit(object)) return 0;
     309             :     VisitMapPointer(object, object->map_slot());
     310             :     return SeqOneByteString::SizeFor(object->synchronized_length());
     311             :   }
     312             : 
     313    37247794 :   int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
     314    37247794 :     if (!ShouldVisit(object)) return 0;
     315             :     VisitMapPointer(object, object->map_slot());
     316             :     return SeqTwoByteString::SizeFor(object->synchronized_length());
     317             :   }
     318             : 
     319             :   // ===========================================================================
     320             :   // Fixed array object ========================================================
     321             :   // ===========================================================================
     322             : 
     323       67073 :   int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
     324             :                                      MemoryChunk* chunk) {
     325             :     // The concurrent marker can process larger chunks than the main thread
     326             :     // marker.
     327             :     const int kProgressBarScanningChunk =
     328             :         RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
     329             :     DCHECK(marking_state_.IsBlackOrGrey(object));
     330       67073 :     marking_state_.GreyToBlack(object);
     331             :     int size = FixedArray::BodyDescriptor::SizeOf(map, object);
     332             :     size_t current_progress_bar = chunk->ProgressBar();
     333       67071 :     if (current_progress_bar == 0) {
     334             :       // Try to move the progress bar forward to start offset. This solves the
     335             :       // problem of not being able to observe a progress bar reset when
     336             :       // processing the first kProgressBarScanningChunk.
     337        7725 :       if (!chunk->TrySetProgressBar(0,
     338             :                                     FixedArray::BodyDescriptor::kStartOffset))
     339             :         return 0;
     340             :       current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
     341             :     }
     342       67072 :     int start = static_cast<int>(current_progress_bar);
     343       67072 :     int end = Min(size, start + kProgressBarScanningChunk);
     344       67072 :     if (start < end) {
     345       67025 :       VisitPointers(object, object.RawField(start), object.RawField(end));
     346             :       // Setting the progress bar can fail if the object that is currently
     347             :       // scanned is also revisited. In this case, there may be two tasks racing
     348             :       // on the progress counter. The looser can bail out because the progress
     349             :       // bar is reset before the tasks race on the object.
     350       67026 :       if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
     351             :         // The object can be pushed back onto the marking worklist only after
     352             :         // progress bar was updated.
     353             :         shared_.Push(object);
     354             :       }
     355             :     }
     356       67073 :     return end - start;
     357             :   }
     358             : 
     359     7648269 :   int VisitFixedArray(Map map, FixedArray object) {
     360             :     // Arrays with the progress bar are not left-trimmable because they reside
     361             :     // in the large object space.
     362             :     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
     363             :     return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
     364             :                ? VisitFixedArrayWithProgressBar(map, object, chunk)
     365     7648269 :                : VisitLeftTrimmableArray(map, object);
     366             :   }
     367             : 
     368             :   int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
     369       77100 :     return VisitLeftTrimmableArray(map, object);
     370             :   }
     371             : 
     372             :   // ===========================================================================
     373             :   // Side-effectful visitation.
     374             :   // ===========================================================================
     375             : 
     376    37628333 :   int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
     377    37628333 :     if (!ShouldVisit(shared_info)) return 0;
     378             : 
     379             :     int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
     380             :     VisitMapPointer(shared_info, shared_info->map_slot());
     381             :     SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
     382    37792993 :                                                     this);
     383             : 
     384             :     // If the SharedFunctionInfo has old bytecode, mark it as flushable,
     385             :     // otherwise visit the function data field strongly.
     386    37033467 :     if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
     387      204509 :       weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
     388             :     } else {
     389             :       VisitPointer(shared_info, shared_info->RawField(
     390    36934446 :                                     SharedFunctionInfo::kFunctionDataOffset));
     391             :     }
     392             :     return size;
     393             :   }
     394             : 
     395     1405037 :   int VisitBytecodeArray(Map map, BytecodeArray object) {
     396     1405037 :     if (!ShouldVisit(object)) return 0;
     397             :     int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
     398             :     VisitMapPointer(object, object->map_slot());
     399     1405585 :     BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
     400     1409180 :     if (!is_forced_gc_) {
     401     1124202 :       object->MakeOlder();
     402             :     }
     403             :     return size;
     404             :   }
     405             : 
     406    35538773 :   int VisitJSFunction(Map map, JSFunction object) {
     407    35538773 :     int size = VisitJSObjectSubclass(map, object);
     408             : 
     409             :     // Check if the JSFunction needs reset due to bytecode being flushed.
     410    70775270 :     if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
     411    35359983 :         object->NeedsResetDueToFlushedBytecode()) {
     412        6387 :       weak_objects_->flushed_js_functions.Push(task_id_, object);
     413             :     }
     414             : 
     415    35415285 :     return size;
     416             :   }
     417             : 
     418    26994075 :   int VisitMap(Map meta_map, Map map) {
     419    26994075 :     if (!ShouldVisit(map)) return 0;
     420             :     int size = Map::BodyDescriptor::SizeOf(meta_map, map);
     421    27551683 :     if (map->CanTransition()) {
     422             :       // Maps that can transition share their descriptor arrays and require
     423             :       // special visiting logic to avoid memory leaks.
     424             :       // Since descriptor arrays are potentially shared, ensure that only the
     425             :       // descriptors that belong to this map are marked. The first time a
     426             :       // non-empty descriptor array is marked, its header is also visited. The
     427             :       // slot holding the descriptor array will be implicitly recorded when the
     428             :       // pointer fields of this map are visited.
     429             :       DescriptorArray descriptors = map->synchronized_instance_descriptors();
     430    27434085 :       MarkDescriptorArrayBlack(descriptors);
     431    27319447 :       int number_of_own_descriptors = map->NumberOfOwnDescriptors();
     432    27319447 :       if (number_of_own_descriptors) {
     433             :         // It is possible that the concurrent marker observes the
     434             :         // number_of_own_descriptors out of sync with the descriptors. In that
     435             :         // case the marking write barrier for the descriptor array will ensure
     436             :         // that all required descriptors are marked. The concurrent marker
     437             :         // just should avoid crashing in that case. That's why we need the
     438             :         // std::min<int>() below.
     439    21145062 :         VisitDescriptors(descriptors,
     440             :                          std::min<int>(number_of_own_descriptors,
     441    42290124 :                                        descriptors->number_of_descriptors()));
     442             :       }
     443             :       // Mark the pointer fields of the Map. Since the transitions array has
     444             :       // been marked already, it is fine that one of these fields contains a
     445             :       // pointer to it.
     446             :     }
     447    27357910 :     Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
     448    26738273 :     return size;
     449             :   }
     450             : 
     451    21224417 :   void VisitDescriptors(DescriptorArray descriptor_array,
     452             :                         int number_of_own_descriptors) {
     453    21224417 :     int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
     454    21224417 :     int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
     455    21224417 :         mark_compact_epoch_, new_marked);
     456    21244824 :     if (old_marked < new_marked) {
     457    12192048 :       VisitPointers(
     458             :           descriptor_array,
     459    12258350 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
     460    12258350 :           MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
     461             :     }
     462    21178522 :   }
     463             : 
     464      168761 :   int VisitDescriptorArray(Map map, DescriptorArray array) {
     465      168761 :     if (!ShouldVisit(array)) return 0;
     466             :     VisitMapPointer(array, array->map_slot());
     467             :     int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
     468             :     VisitPointers(array, array->GetFirstPointerSlot(),
     469      100109 :                   array->GetDescriptorSlot(0));
     470      100067 :     VisitDescriptors(array, array->number_of_descriptors());
     471      100088 :     return size;
     472             :   }
     473             : 
     474      440311 :   int VisitTransitionArray(Map map, TransitionArray array) {
     475      440311 :     if (!ShouldVisit(array)) return 0;
     476             :     VisitMapPointer(array, array->map_slot());
     477             :     int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
     478             :     TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
     479      440196 :     weak_objects_->transition_arrays.Push(task_id_, array);
     480      440222 :     return size;
     481             :   }
     482             : 
     483       37977 :   int VisitJSWeakCollection(Map map, JSWeakCollection object) {
     484       37977 :     return VisitJSObjectSubclass(map, object);
     485             :   }
     486             : 
     487       37118 :   int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
     488       37118 :     if (!ShouldVisit(table)) return 0;
     489       37858 :     weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
     490             : 
     491      317827 :     for (int i = 0; i < table->Capacity(); i++) {
     492             :       ObjectSlot key_slot =
     493             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
     494             :       HeapObject key = HeapObject::cast(table->KeyAt(i));
     495             :       MarkCompactCollector::RecordSlot(table, key_slot, key);
     496             : 
     497             :       ObjectSlot value_slot =
     498             :           table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
     499             : 
     500      141347 :       if (marking_state_.IsBlackOrGrey(key)) {
     501      141244 :         VisitPointer(table, value_slot);
     502             : 
     503             :       } else {
     504         103 :         Object value_obj = table->ValueAt(i);
     505             : 
     506         103 :         if (value_obj->IsHeapObject()) {
     507             :           HeapObject value = HeapObject::cast(value_obj);
     508             :           MarkCompactCollector::RecordSlot(table, value_slot, value);
     509             : 
     510             :           // Revisit ephemerons with both key and value unreachable at end
     511             :           // of concurrent marking cycle.
     512          42 :           if (marking_state_.IsWhite(value)) {
     513          28 :             weak_objects_->discovered_ephemerons.Push(task_id_,
     514          56 :                                                       Ephemeron{key, value});
     515             :           }
     516             :         }
     517             :       }
     518             :     }
     519             : 
     520       36306 :     return table->SizeFromMap(map);
     521             :   }
     522             : 
     523             :   // Implements ephemeron semantics: Marks value if key is already reachable.
     524             :   // Returns true if value was actually marked.
     525         170 :   bool ProcessEphemeron(HeapObject key, HeapObject value) {
     526         170 :     if (marking_state_.IsBlackOrGrey(key)) {
     527          40 :       if (marking_state_.WhiteToGrey(value)) {
     528             :         shared_.Push(value);
     529          40 :         return true;
     530             :       }
     531             : 
     532         130 :     } else if (marking_state_.IsWhite(value)) {
     533         130 :       weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
     534             :     }
     535             : 
     536             :     return false;
     537             :   }
     538             : 
     539  2345071183 :   void MarkObject(HeapObject object) {
     540             : #ifdef THREAD_SANITIZER
     541             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     542             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     543             :     // corresponding release store.
     544             :     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
     545             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     546             : #endif
     547  2356815916 :     if (marking_state_.WhiteToGrey(object)) {
     548             :       shared_.Push(object);
     549             :     }
     550  2355515051 :   }
     551             : 
     552    27379290 :   void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
     553             :     marking_state_.WhiteToGrey(descriptors);
     554    54900433 :     if (marking_state_.GreyToBlack(descriptors)) {
     555             :       VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
     556    12123061 :                     descriptors->GetDescriptorSlot(0));
     557             :     }
     558    27445074 :   }
     559             : 
     560             :  private:
     561             :   // Helper class for collecting in-object slot addresses and values.
     562    57349724 :   class SlotSnapshottingVisitor final : public ObjectVisitor {
     563             :    public:
     564             :     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
     565    56835072 :         : slot_snapshot_(slot_snapshot) {
     566             :       slot_snapshot_->clear();
     567             :     }
     568             : 
     569   112880275 :     void VisitPointers(HeapObject host, ObjectSlot start,
     570             :                        ObjectSlot end) override {
     571   564384428 :       for (ObjectSlot p = start; p < end; ++p) {
     572             :         Object object = p.Relaxed_Load();
     573   338623878 :         slot_snapshot_->add(p, object);
     574             :       }
     575   112880275 :     }
     576             : 
     577           0 :     void VisitPointers(HeapObject host, MaybeObjectSlot start,
     578             :                        MaybeObjectSlot end) override {
     579             :       // This should never happen, because we don't use snapshotting for objects
     580             :       // which contain weak references.
     581           0 :       UNREACHABLE();
     582             :     }
     583             : 
     584           0 :     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
     585             :       // This should never happen, because snapshotting is performed only on
     586             :       // JSObjects (and derived classes).
     587           0 :       UNREACHABLE();
     588             :     }
     589             : 
     590           0 :     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
     591             :       // This should never happen, because snapshotting is performed only on
     592             :       // JSObjects (and derived classes).
     593           0 :       UNREACHABLE();
     594             :     }
     595             : 
     596          49 :     void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
     597             :                                  ObjectSlot end) override {
     598             :       DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
     599          49 :     }
     600             : 
     601             :    private:
     602             :     SlotSnapshot* slot_snapshot_;
     603             :   };
     604             : 
     605             :   template <typename T>
     606             :   int VisitJSObjectSubclassFast(Map map, T object) {
     607             :     DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
     608             :     using TBodyDescriptor = typename T::FastBodyDescriptor;
     609    11862751 :     return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
     610             :   }
     611             : 
     612             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     613    47633178 :   int VisitJSObjectSubclass(Map map, T object) {
     614             :     int size = TBodyDescriptor::SizeOf(map, object);
     615    47633178 :     int used_size = map->UsedInstanceSize();
     616             :     DCHECK_LE(used_size, size);
     617             :     DCHECK_GE(used_size, T::kHeaderSize);
     618             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
     619    47583093 :                                                           used_size, size);
     620             :   }
     621             : 
     622             :   template <typename T>
     623      173722 :   int VisitEmbedderTracingSubclass(Map map, T object) {
     624             :     DCHECK(object->IsApiWrapper());
     625      173722 :     int size = VisitJSObjectSubclass(map, object);
     626      174082 :     if (size && embedder_tracing_enabled_) {
     627             :       // Success: The object needs to be processed for embedder references on
     628             :       // the main thread.
     629             :       embedder_objects_.Push(object);
     630             :     }
     631      174082 :     return size;
     632             :   }
     633             : 
     634             :   template <typename T>
     635     7658457 :   int VisitLeftTrimmableArray(Map map, T object) {
     636             :     // The synchronized_length() function checks that the length is a Smi.
     637             :     // This is not necessarily the case if the array is being left-trimmed.
     638             :     Object length = object->unchecked_synchronized_length();
     639     7658457 :     if (!ShouldVisit(object)) return 0;
     640             :     // The cached length must be the actual length as the array is not black.
     641             :     // Left trimming marks the array black before over-writing the length.
     642             :     DCHECK(length->IsSmi());
     643             :     int size = T::SizeFor(Smi::ToInt(length));
     644             :     VisitMapPointer(object, object->map_slot());
     645             :     T::BodyDescriptor::IterateBody(map, object, size, this);
     646     7538043 :     return size;
     647             :   }
     648             : 
     649             :   template <typename T>
     650             :   int VisitFullyWithSnapshot(Map map, T object) {
     651             :     using TBodyDescriptor = typename T::BodyDescriptor;
     652             :     int size = TBodyDescriptor::SizeOf(map, object);
     653             :     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
     654     9281641 :                                                           size);
     655             :   }
     656             : 
     657             :   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
     658    56858851 :   int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
     659             :     const SlotSnapshot& snapshot =
     660    56858851 :         MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
     661    57348393 :     if (!ShouldVisit(object)) return 0;
     662    57149148 :     VisitPointersInSnapshot(object, snapshot);
     663    35592106 :     return size;
     664             :   }
     665             : 
     666             :   template <typename T, typename TBodyDescriptor>
     667    56835072 :   const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
     668    56835072 :     SlotSnapshottingVisitor visitor(&slot_snapshot_);
     669             :     visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
     670      112551 :     TBodyDescriptor::IterateBody(map, object, size, &visitor);
     671    57349724 :     return slot_snapshot_;
     672             :   }
     673             : 
     674     2555493 :   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
     675             :     MarkCompactCollector::RecordRelocSlotInfo info =
     676     2555493 :         MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
     677     2553615 :     if (info.should_record) {
     678      104684 :       MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
     679      104684 :       if (!data.typed_slots) {
     680         501 :         data.typed_slots.reset(new TypedSlots());
     681             :       }
     682      209368 :       data.typed_slots->Insert(info.slot_type, info.offset);
     683             :     }
     684     2553617 :   }
     685             : 
     686             :   ConcurrentMarking::MarkingWorklist::View shared_;
     687             :   WeakObjects* weak_objects_;
     688             :   ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
     689             :   ConcurrentMarkingState marking_state_;
     690             :   MemoryChunkDataMap* memory_chunk_data_;
     691             :   int task_id_;
     692             :   SlotSnapshot slot_snapshot_;
     693             :   bool embedder_tracing_enabled_;
     694             :   const unsigned mark_compact_epoch_;
     695             :   bool is_forced_gc_;
     696             :   BytecodeFlushMode bytecode_flush_mode_;
     697             : };
     698             : 
     699             : // Strings can change maps due to conversion to thin string or external strings.
     700             : // Use unchecked cast to avoid data race in slow dchecks.
     701             : template <>
     702           0 : ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     703           0 :   return ConsString::unchecked_cast(object);
     704             : }
     705             : 
     706             : template <>
     707           0 : SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     708           0 :   return SlicedString::unchecked_cast(object);
     709             : }
     710             : 
     711             : template <>
     712           0 : ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     713           0 :   return ThinString::unchecked_cast(object);
     714             : }
     715             : 
     716             : template <>
     717           0 : SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     718           0 :   return SeqOneByteString::unchecked_cast(object);
     719             : }
     720             : 
     721             : template <>
     722           0 : SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
     723           0 :   return SeqTwoByteString::unchecked_cast(object);
     724             : }
     725             : 
     726             : // Fixed array can become a free space during left trimming.
     727             : template <>
     728           0 : FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
     729           0 :   return FixedArray::unchecked_cast(object);
     730             : }
     731             : 
     732             : class ConcurrentMarking::Task : public CancelableTask {
     733             :  public:
     734             :   Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
     735             :        TaskState* task_state, int task_id)
     736             :       : CancelableTask(isolate),
     737             :         concurrent_marking_(concurrent_marking),
     738             :         task_state_(task_state),
     739      572894 :         task_id_(task_id) {}
     740             : 
     741     1144767 :   ~Task() override = default;
     742             : 
     743             :  private:
     744             :   // v8::internal::CancelableTask overrides.
     745      473540 :   void RunInternal() override {
     746      473540 :     concurrent_marking_->Run(task_id_, task_state_);
     747      474252 :   }
     748             : 
     749             :   ConcurrentMarking* concurrent_marking_;
     750             :   TaskState* task_state_;
     751             :   int task_id_;
     752             :   DISALLOW_COPY_AND_ASSIGN(Task);
     753             : };
     754             : 
     755       62437 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
     756             :                                      MarkingWorklist* on_hold,
     757             :                                      WeakObjects* weak_objects,
     758             :                                      EmbedderTracingWorklist* embedder_objects)
     759             :     : heap_(heap),
     760             :       shared_(shared),
     761             :       on_hold_(on_hold),
     762             :       weak_objects_(weak_objects),
     763      624370 :       embedder_objects_(embedder_objects) {
     764             : // The runtime flag should be set only if the compile time flag was set.
     765             : #ifndef V8_CONCURRENT_MARKING
     766             :   CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
     767             : #endif
     768       62437 : }
     769             : 
     770      473217 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
     771     2367665 :   TRACE_BACKGROUND_GC(heap_->tracer(),
     772             :                       GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
     773             :   size_t kBytesUntilInterruptCheck = 64 * KB;
     774             :   int kObjectsUntilInterrupCheck = 1000;
     775             :   ConcurrentMarkingVisitor visitor(
     776      947308 :       shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
     777      473654 :       task_id, heap_->local_embedder_heap_tracer()->InUse(),
     778      947308 :       task_state->mark_compact_epoch, task_state->is_forced_gc);
     779             :   double time_ms;
     780             :   size_t marked_bytes = 0;
     781      473654 :   if (FLAG_trace_concurrent_marking) {
     782             :     heap_->isolate()->PrintWithTimestamp(
     783           0 :         "Starting concurrent marking task %d\n", task_id);
     784             :   }
     785             :   bool ephemeron_marked = false;
     786             : 
     787             :   {
     788             :     TimedScope scope(&time_ms);
     789             : 
     790             :     {
     791      473195 :       Ephemeron ephemeron;
     792             : 
     793      473337 :       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
     794         142 :         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
     795             :           ephemeron_marked = true;
     796             :         }
     797             :       }
     798             :     }
     799             : 
     800             :     bool done = false;
     801     1641414 :     while (!done) {
     802             :       size_t current_marked_bytes = 0;
     803             :       int objects_processed = 0;
     804  1243528237 :       while (current_marked_bytes < kBytesUntilInterruptCheck &&
     805   414831634 :              objects_processed < kObjectsUntilInterrupCheck) {
     806   414136743 :         HeapObject object;
     807   414136743 :         if (!shared_->Pop(task_id, &object)) {
     808             :           done = true;
     809      473376 :           break;
     810             :         }
     811   413872006 :         objects_processed++;
     812             :         // The order of the two loads is important.
     813   413872006 :         Address new_space_top = heap_->new_space()->original_top_acquire();
     814   413872006 :         Address new_space_limit = heap_->new_space()->original_limit_relaxed();
     815   413872006 :         Address new_large_object = heap_->new_lo_space()->pending_object();
     816             :         Address addr = object->address();
     817   413872006 :         if ((new_space_top <= addr && addr < new_space_limit) ||
     818             :             addr == new_large_object) {
     819           0 :           on_hold_->Push(task_id, object);
     820             :         } else {
     821             :           Map map = object->synchronized_map();
     822   406696220 :           current_marked_bytes += visitor.Visit(map, object);
     823             :         }
     824             :       }
     825     1168267 :       marked_bytes += current_marked_bytes;
     826     1168267 :       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
     827             :                                                 marked_bytes);
     828     1168267 :       if (task_state->preemption_request) {
     829         951 :         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
     830             :                      "ConcurrentMarking::Run Preempted");
     831             :         break;
     832             :       }
     833             :     }
     834             : 
     835      675066 :     if (done) {
     836      473174 :       Ephemeron ephemeron;
     837             : 
     838      473202 :       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
     839          28 :         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
     840             :           ephemeron_marked = true;
     841             :         }
     842             :       }
     843             :     }
     844             : 
     845      675333 :     shared_->FlushToGlobal(task_id);
     846      473552 :     on_hold_->FlushToGlobal(task_id);
     847      473568 :     embedder_objects_->FlushToGlobal(task_id);
     848             : 
     849      473567 :     weak_objects_->transition_arrays.FlushToGlobal(task_id);
     850      473605 :     weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
     851      473598 :     weak_objects_->current_ephemerons.FlushToGlobal(task_id);
     852      473407 :     weak_objects_->next_ephemerons.FlushToGlobal(task_id);
     853      473393 :     weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
     854      473167 :     weak_objects_->weak_references.FlushToGlobal(task_id);
     855      473426 :     weak_objects_->js_weak_refs.FlushToGlobal(task_id);
     856      473533 :     weak_objects_->weak_cells.FlushToGlobal(task_id);
     857      473442 :     weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
     858      473485 :     weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
     859      473498 :     weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
     860      473430 :     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
     861             :     total_marked_bytes_ += marked_bytes;
     862             : 
     863      473430 :     if (ephemeron_marked) {
     864             :       set_ephemeron_marked(true);
     865             :     }
     866             : 
     867             :     {
     868      473430 :       base::MutexGuard guard(&pending_lock_);
     869      474409 :       is_pending_[task_id] = false;
     870      474409 :       --pending_task_count_;
     871      474409 :       pending_condition_.NotifyAll();
     872             :     }
     873             :   }
     874      473960 :   if (FLAG_trace_concurrent_marking) {
     875           0 :     heap_->isolate()->PrintWithTimestamp(
     876             :         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
     877           0 :         static_cast<int>(marked_bytes / KB), time_ms);
     878             :   }
     879      474356 : }
     880             : 
     881       81842 : void ConcurrentMarking::ScheduleTasks() {
     882             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     883             :   DCHECK(!heap_->IsTearingDown());
     884       81842 :   base::MutexGuard guard(&pending_lock_);
     885             :   DCHECK_EQ(0, pending_task_count_);
     886       81842 :   if (task_count_ == 0) {
     887             :     static const int num_cores =
     888       16761 :         V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
     889             : #if defined(V8_OS_MACOSX)
     890             :     // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
     891             :     // marking on competing hyper-threads (regresses Octane/Splay). As such,
     892             :     // only use num_cores/2, leaving one of those for the main thread.
     893             :     // TODO(ulan): Use all cores on Mac 10.12+.
     894             :     task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
     895             : #else   // defined(OS_MACOSX)
     896             :     // On other platforms use all logical cores, leaving one for the main
     897             :     // thread.
     898       33522 :     task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
     899             : #endif  // defined(OS_MACOSX)
     900             :   }
     901             :   // Task id 0 is for the main thread.
     902      654736 :   for (int i = 1; i <= task_count_; i++) {
     903      572894 :     if (!is_pending_[i]) {
     904      572894 :       if (FLAG_trace_concurrent_marking) {
     905           0 :         heap_->isolate()->PrintWithTimestamp(
     906           0 :             "Scheduling concurrent marking task %d\n", i);
     907             :       }
     908      572894 :       task_state_[i].preemption_request = false;
     909      572894 :       task_state_[i].mark_compact_epoch =
     910     1145788 :           heap_->mark_compact_collector()->epoch();
     911      572894 :       task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
     912      572894 :       is_pending_[i] = true;
     913      572894 :       ++pending_task_count_;
     914             :       auto task =
     915     1145788 :           base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
     916     1145788 :       cancelable_id_[i] = task->id();
     917     1718682 :       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     918             :     }
     919             :   }
     920             :   DCHECK_EQ(task_count_, pending_task_count_);
     921       81842 : }
     922             : 
     923     1703986 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
     924             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     925     1703986 :   if (heap_->IsTearingDown()) return;
     926             :   {
     927     1703986 :     base::MutexGuard guard(&pending_lock_);
     928     1703986 :     if (pending_task_count_ > 0) return;
     929             :   }
     930     4773374 :   if (!shared_->IsGlobalPoolEmpty() ||
     931     4718854 :       !weak_objects_->current_ephemerons.IsEmpty() ||
     932     1554764 :       !weak_objects_->discovered_ephemerons.IsEmpty()) {
     933       54539 :     ScheduleTasks();
     934             :   }
     935             : }
     936             : 
     937      231209 : bool ConcurrentMarking::Stop(StopRequest stop_request) {
     938             :   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
     939      231209 :   base::MutexGuard guard(&pending_lock_);
     940             : 
     941      231209 :   if (pending_task_count_ == 0) return false;
     942             : 
     943       34837 :   if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
     944             :     CancelableTaskManager* task_manager =
     945       34820 :         heap_->isolate()->cancelable_task_manager();
     946      522300 :     for (int i = 1; i <= task_count_; i++) {
     947      243740 :       if (is_pending_[i]) {
     948      178691 :         if (task_manager->TryAbort(cancelable_id_[i]) ==
     949             :             TryAbortResult::kTaskAborted) {
     950       84410 :           is_pending_[i] = false;
     951       84410 :           --pending_task_count_;
     952       94281 :         } else if (stop_request == StopRequest::PREEMPT_TASKS) {
     953         445 :           task_state_[i].preemption_request = true;
     954             :         }
     955             :       }
     956             :     }
     957             :   }
     958       65427 :   while (pending_task_count_ > 0) {
     959       65427 :     pending_condition_.Wait(&pending_lock_);
     960             :   }
     961             :   for (int i = 1; i <= task_count_; i++) {
     962             :     DCHECK(!is_pending_[i]);
     963             :   }
     964             :   return true;
     965             : }
     966             : 
     967           0 : bool ConcurrentMarking::IsStopped() {
     968           0 :   if (!FLAG_concurrent_marking) return true;
     969             : 
     970           0 :   base::MutexGuard guard(&pending_lock_);
     971           0 :   return pending_task_count_ == 0;
     972             : }
     973             : 
     974      204233 : void ConcurrentMarking::FlushMemoryChunkData(
     975             :     MajorNonAtomicMarkingState* marking_state) {
     976             :   DCHECK_EQ(pending_task_count_, 0);
     977     3062885 :   for (int i = 1; i <= task_count_; i++) {
     978     1429327 :     MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
     979     2386548 :     for (auto& pair : memory_chunk_data) {
     980             :       // ClearLiveness sets the live bytes to zero.
     981             :       // Pages with zero live bytes might be already unmapped.
     982      957221 :       MemoryChunk* memory_chunk = pair.first;
     983             :       MemoryChunkData& data = pair.second;
     984      957221 :       if (data.live_bytes) {
     985             :         marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
     986             :       }
     987      957221 :       if (data.typed_slots) {
     988         497 :         RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
     989         497 :                                               std::move(data.typed_slots));
     990             :       }
     991             :     }
     992             :     memory_chunk_data.clear();
     993     1429326 :     task_state_[i].marked_bytes = 0;
     994             :   }
     995             :   total_marked_bytes_ = 0;
     996      204232 : }
     997             : 
     998     1153401 : void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
     999     9653257 :   for (int i = 1; i <= task_count_; i++) {
    1000             :     auto it = task_state_[i].memory_chunk_data.find(chunk);
    1001     4249928 :     if (it != task_state_[i].memory_chunk_data.end()) {
    1002        9225 :       it->second.live_bytes = 0;
    1003             :       it->second.typed_slots.reset();
    1004             :     }
    1005             :   }
    1006     1153401 : }
    1007             : 
    1008     1083002 : size_t ConcurrentMarking::TotalMarkedBytes() {
    1009             :   size_t result = 0;
    1010    16245030 :   for (int i = 1; i <= task_count_; i++) {
    1011             :     result +=
    1012    15162028 :         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
    1013             :   }
    1014     1083002 :   result += total_marked_bytes_;
    1015     1083002 :   return result;
    1016             : }
    1017             : 
    1018       27814 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
    1019             :     : concurrent_marking_(concurrent_marking),
    1020       54761 :       resume_on_exit_(FLAG_concurrent_marking &&
    1021       26947 :                       concurrent_marking_->Stop(
    1022       55628 :                           ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
    1023             :   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
    1024       27814 : }
    1025             : 
    1026       55628 : ConcurrentMarking::PauseScope::~PauseScope() {
    1027       27814 :   if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
    1028       27814 : }
    1029             : 
    1030             : }  // namespace internal
    1031      121996 : }  // namespace v8

Generated by: LCOV version 1.10