LCOV - code coverage report
Current view: top level - src/heap - concurrent-marking.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 237 241 98.3 %
Date: 2017-10-20 Functions: 47 52 90.4 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/concurrent-marking.h"
       6             : 
       7             : #include <stack>
       8             : #include <unordered_map>
       9             : 
      10             : #include "src/heap/heap-inl.h"
      11             : #include "src/heap/heap.h"
      12             : #include "src/heap/mark-compact-inl.h"
      13             : #include "src/heap/mark-compact.h"
      14             : #include "src/heap/marking.h"
      15             : #include "src/heap/objects-visiting-inl.h"
      16             : #include "src/heap/objects-visiting.h"
      17             : #include "src/heap/worklist.h"
      18             : #include "src/isolate.h"
      19             : #include "src/utils-inl.h"
      20             : #include "src/utils.h"
      21             : #include "src/v8.h"
      22             : 
      23             : namespace v8 {
      24             : namespace internal {
      25             : 
      26             : class ConcurrentMarkingState final
      27             :     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
      28             :  public:
      29             :   explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
      30      231387 :       : live_bytes_(live_bytes) {}
      31             : 
      32  2436158607 :   Bitmap* bitmap(const MemoryChunk* chunk) {
      33  2436158607 :     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
      34             :   }
      35             : 
      36   350511024 :   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
      37   700346920 :     (*live_bytes_)[chunk] += by;
      38   349835896 :   }
      39             : 
      40             :   // The live_bytes and SetLiveBytes methods of the marking state are
      41             :   // not used by the concurrent marker.
      42             : 
      43             :  private:
      44             :   LiveBytesMap* live_bytes_;
      45             : };
      46             : 
      47             : // Helper class for storing in-object slot addresses and values.
      48             : class SlotSnapshot {
      49             :  public:
      50    58498377 :   SlotSnapshot() : number_of_slots_(0) {}
      51             :   int number_of_slots() const { return number_of_slots_; }
      52   105238291 :   Object** slot(int i) const { return snapshot_[i].first; }
      53   105238291 :   Object* value(int i) const { return snapshot_[i].second; }
      54    24255676 :   void clear() { number_of_slots_ = 0; }
      55             :   void add(Object** slot, Object* value) {
      56   106161690 :     snapshot_[number_of_slots_].first = slot;
      57   106161690 :     snapshot_[number_of_slots_].second = value;
      58   106161690 :     ++number_of_slots_;
      59             :   }
      60             : 
      61             :  private:
      62             :   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
      63             :   int number_of_slots_;
      64             :   std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
      65             :   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
      66             : };
      67             : 
      68           0 : class ConcurrentMarkingVisitor final
      69             :     : public HeapVisitor<int, ConcurrentMarkingVisitor> {
      70             :  public:
      71             :   using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
      72             : 
      73             :   explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
      74             :                                     ConcurrentMarking::MarkingWorklist* bailout,
      75             :                                     LiveBytesMap* live_bytes,
      76             :                                     WeakObjects* weak_objects, int task_id)
      77             :       : shared_(shared, task_id),
      78             :         bailout_(bailout, task_id),
      79             :         weak_objects_(weak_objects),
      80             :         marking_state_(live_bytes),
      81      694161 :         task_id_(task_id) {}
      82             : 
      83             :   template <typename T>
      84             :   static V8_INLINE T* Cast(HeapObject* object) {
      85     5649237 :     return T::cast(object);
      86             :   }
      87             : 
      88   346822446 :   bool ShouldVisit(HeapObject* object) {
      89   696753005 :     return marking_state_.GreyToBlack(object);
      90             :   }
      91             : 
      92   628869564 :   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
      93  2949460205 :     for (Object** slot = start; slot < end; slot++) {
      94             :       Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
      95  2317469107 :       if (!object->IsHeapObject()) continue;
      96  1956045011 :       MarkObject(HeapObject::cast(object));
      97             :       MarkCompactCollector::RecordSlot(host, slot, object);
      98             :     }
      99   631991098 :   }
     100             : 
     101   153029536 :   void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
     102   258315198 :     for (int i = 0; i < snapshot.number_of_slots(); i++) {
     103             :       Object** slot = snapshot.slot(i);
     104             :       Object* object = snapshot.value(i);
     105   105238291 :       if (!object->IsHeapObject()) continue;
     106   101221561 :       MarkObject(HeapObject::cast(object));
     107             :       MarkCompactCollector::RecordSlot(host, slot, object);
     108             :     }
     109    23919308 :   }
     110             : 
     111             :   // ===========================================================================
     112             :   // JS object =================================================================
     113             :   // ===========================================================================
     114             : 
     115     9999832 :   int VisitJSObject(Map* map, JSObject* object) {
     116             :     int size = JSObject::BodyDescriptor::SizeOf(map, object);
     117     9999832 :     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
     118    10015735 :     if (!ShouldVisit(object)) return 0;
     119     9972185 :     VisitPointersInSnapshot(object, snapshot);
     120     9951074 :     return size;
     121             :   }
     122             : 
     123     9951243 :   int VisitJSObjectFast(Map* map, JSObject* object) {
     124     9951243 :     return VisitJSObject(map, object);
     125             :   }
     126             : 
     127       49282 :   int VisitJSApiObject(Map* map, JSObject* object) {
     128       49282 :     if (marking_state_.IsGrey(object)) {
     129             :       // The main thread will do wrapper tracing in Blink.
     130       49256 :       bailout_.Push(object);
     131             :     }
     132       49282 :     return 0;
     133             :   }
     134             : 
     135             :   // ===========================================================================
     136             :   // Strings with pointers =====================================================
     137             :   // ===========================================================================
     138             : 
     139    12651758 :   int VisitConsString(Map* map, ConsString* object) {
     140             :     int size = ConsString::BodyDescriptor::SizeOf(map, object);
     141    12651758 :     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
     142    12684675 :     if (!ShouldVisit(object)) return 0;
     143    12708992 :     VisitPointersInSnapshot(object, snapshot);
     144             :     return size;
     145             :   }
     146             : 
     147       69308 :   int VisitSlicedString(Map* map, SlicedString* object) {
     148             :     int size = SlicedString::BodyDescriptor::SizeOf(map, object);
     149       69308 :     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
     150       70352 :     if (!ShouldVisit(object)) return 0;
     151       70991 :     VisitPointersInSnapshot(object, snapshot);
     152             :     return size;
     153             :   }
     154             : 
     155     1532068 :   int VisitThinString(Map* map, ThinString* object) {
     156             :     int size = ThinString::BodyDescriptor::SizeOf(map, object);
     157     1532068 :     const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
     158     1534184 :     if (!ShouldVisit(object)) return 0;
     159     1244104 :     VisitPointersInSnapshot(object, snapshot);
     160             :     return size;
     161             :   }
     162             : 
     163             :   // ===========================================================================
     164             :   // Strings without pointers ==================================================
     165             :   // ===========================================================================
     166             : 
     167    74600164 :   int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
     168             :     int size = SeqOneByteString::SizeFor(object->synchronized_length());
     169    74600164 :     if (!ShouldVisit(object)) return 0;
     170    75000660 :     VisitMapPointer(object, object->map_slot());
     171             :     return size;
     172             :   }
     173             : 
     174     8284164 :   int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
     175             :     int size = SeqTwoByteString::SizeFor(object->synchronized_length());
     176     8284164 :     if (!ShouldVisit(object)) return 0;
     177     8382504 :     VisitMapPointer(object, object->map_slot());
     178             :     return size;
     179             :   }
     180             : 
     181             :   // ===========================================================================
     182             :   // Fixed array object ========================================================
     183             :   // ===========================================================================
     184             : 
     185    17089561 :   int VisitFixedArray(Map* map, FixedArray* object) {
     186             :     int length = object->synchronized_length();
     187             :     int size = FixedArray::SizeFor(length);
     188    17089561 :     if (!ShouldVisit(object)) return 0;
     189    17111561 :     VisitMapPointer(object, object->map_slot());
     190             :     FixedArray::BodyDescriptor::IterateBody(object, size, this);
     191             :     return size;
     192             :   }
     193             : 
     194             :   // ===========================================================================
     195             :   // Code object ===============================================================
     196             :   // ===========================================================================
     197             : 
     198    50186671 :   int VisitCode(Map* map, Code* object) {
     199    50186671 :     bailout_.Push(object);
     200    50160503 :     return 0;
     201             :   }
     202             : 
     203             :   // ===========================================================================
     204             :   // Objects with weak fields and/or side-effectiful visitation.
     205             :   // ===========================================================================
     206             : 
     207     1142062 :   int VisitBytecodeArray(Map* map, BytecodeArray* object) {
     208     1142062 :     if (!ShouldVisit(object)) return 0;
     209             :     int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
     210     1142449 :     VisitMapPointer(object, object->map_slot());
     211     1142184 :     BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
     212     1142429 :     object->MakeOlder();
     213             :     return size;
     214             :   }
     215             : 
     216      302157 :   int VisitAllocationSite(Map* map, AllocationSite* object) {
     217      302157 :     if (!ShouldVisit(object)) return 0;
     218             :     int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
     219      302357 :     VisitMapPointer(object, object->map_slot());
     220             :     AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
     221             :     return size;
     222             :   }
     223             : 
     224    23339605 :   int VisitJSFunction(Map* map, JSFunction* object) {
     225    23339605 :     if (!ShouldVisit(object)) return 0;
     226             :     int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
     227    23397617 :     VisitMapPointer(object, object->map_slot());
     228             :     JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
     229    23332791 :     return size;
     230             :   }
     231             : 
     232    28136389 :   int VisitMap(Map* meta_map, Map* map) {
     233    28086643 :     if (marking_state_.IsGrey(map)) {
     234             :       // Maps have ad-hoc weakness for descriptor arrays. They also clear the
     235             :       // code-cache. Conservatively visit strong fields skipping the
     236             :       // descriptor array field and the code cache field.
     237    28090277 :       VisitMapPointer(map, map->map_slot());
     238    28156212 :       VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
     239             :       VisitPointer(
     240    28182869 :           map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
     241             :       VisitPointer(map, HeapObject::RawField(
     242    28214639 :                             map, Map::kTransitionsOrPrototypeInfoOffset));
     243    28203389 :       VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
     244    28195408 :       VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
     245    28235315 :       bailout_.Push(map);
     246             :     }
     247    28215561 :     return 0;
     248             :   }
     249             : 
     250       51326 :   int VisitNativeContext(Map* map, Context* object) {
     251       51326 :     if (!ShouldVisit(object)) return 0;
     252             :     int size = Context::BodyDescriptorWeak::SizeOf(map, object);
     253       51326 :     VisitMapPointer(object, object->map_slot());
     254             :     Context::BodyDescriptorWeak::IterateBody(object, size, this);
     255             :     return size;
     256             :   }
     257             : 
     258      394371 :   int VisitTransitionArray(Map* map, TransitionArray* array) {
     259      394371 :     if (!ShouldVisit(array)) return 0;
     260      394376 :     VisitMapPointer(array, array->map_slot());
     261             :     int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
     262             :     TransitionArray::BodyDescriptor::IterateBody(array, size, this);
     263      394374 :     weak_objects_->transition_arrays.Push(task_id_, array);
     264      394372 :     return size;
     265             :   }
     266             : 
     267    55768759 :   int VisitWeakCell(Map* map, WeakCell* object) {
     268    55768759 :     if (!ShouldVisit(object)) return 0;
     269    56093708 :     VisitMapPointer(object, object->map_slot());
     270    55936273 :     if (!object->cleared()) {
     271             :       HeapObject* value = HeapObject::cast(object->value());
     272    50961166 :       if (marking_state_.IsBlackOrGrey(value)) {
     273             :         // Weak cells with live values are directly processed here to reduce
     274             :         // the processing time of weak cells during the main GC pause.
     275    43516900 :         Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
     276             :         MarkCompactCollector::RecordSlot(object, slot, value);
     277             :       } else {
     278             :         // If we do not know about liveness of values of weak cells, we have to
     279             :         // process them when we know the liveness of the whole transitive
     280             :         // closure.
     281     7444266 :         weak_objects_->weak_cells.Push(task_id_, object);
     282             :       }
     283             :     }
     284             :     return WeakCell::BodyDescriptor::SizeOf(map, object);
     285             :   }
     286             : 
     287        8779 :   int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
     288             :     // TODO(ulan): implement iteration of strong fields.
     289        8779 :     bailout_.Push(object);
     290        8772 :     return 0;
     291             :   }
     292             : 
     293  1973522191 :   void MarkObject(HeapObject* object) {
     294             : #ifdef THREAD_SANITIZER
     295             :     // Perform a dummy acquire load to tell TSAN that there is no data race
     296             :     // in mark-bit initialization. See MemoryChunk::Initialize for the
     297             :     // corresponding release store.
     298             :     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
     299             :     CHECK_NOT_NULL(chunk->synchronized_heap());
     300             : #endif
     301  2055908965 :     if (marking_state_.WhiteToGrey(object)) {
     302   237569310 :       shared_.Push(object);
     303             :     }
     304  2055721658 :   }
     305             : 
     306             :  private:
     307             :   // Helper class for collecting in-object slot addresses and values.
     308           0 :   class SlotSnapshottingVisitor final : public ObjectVisitor {
     309             :    public:
     310             :     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
     311    24255676 :         : slot_snapshot_(slot_snapshot) {
     312             :       slot_snapshot_->clear();
     313             :     }
     314             : 
     315    48221738 :     void VisitPointers(HeapObject* host, Object** start,
     316             :                        Object** end) override {
     317   154383428 :       for (Object** p = start; p < end; p++) {
     318             :         Object* object = reinterpret_cast<Object*>(
     319   106161690 :             base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
     320   106161690 :         slot_snapshot_->add(p, object);
     321             :       }
     322    48221738 :     }
     323             : 
     324             :    private:
     325             :     SlotSnapshot* slot_snapshot_;
     326             :   };
     327             : 
     328             :   template <typename T>
     329    24255676 :   const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
     330             :     // TODO(ulan): Iterate only the existing fields and skip slack at the end
     331             :     // of the object.
     332    24255676 :     SlotSnapshottingVisitor visitor(&slot_snapshot_);
     333    24255676 :     visitor.VisitPointer(object,
     334             :                          reinterpret_cast<Object**>(object->map_slot()));
     335             :     T::BodyDescriptor::IterateBody(object, size, &visitor);
     336    24303506 :     return slot_snapshot_;
     337             :   }
     338             :   ConcurrentMarking::MarkingWorklist::View shared_;
     339             :   ConcurrentMarking::MarkingWorklist::View bailout_;
     340             :   WeakObjects* weak_objects_;
     341             :   ConcurrentMarkingState marking_state_;
     342             :   int task_id_;
     343             :   SlotSnapshot slot_snapshot_;
     344             : };
     345             : 
     346             : // Strings can change maps due to conversion to thin string or external strings.
     347             : // Use reinterpret cast to avoid data race in slow dchecks.
     348             : template <>
     349    12654885 : ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
     350    12654885 :   return reinterpret_cast<ConsString*>(object);
     351             : }
     352             : 
     353             : template <>
     354       69318 : SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
     355       69318 :   return reinterpret_cast<SlicedString*>(object);
     356             : }
     357             : 
     358             : template <>
     359     1532113 : ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
     360     1532113 :   return reinterpret_cast<ThinString*>(object);
     361             : }
     362             : 
     363             : template <>
     364    74608782 : SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
     365    74608782 :   return reinterpret_cast<SeqOneByteString*>(object);
     366             : }
     367             : 
     368             : template <>
     369     8289267 : SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
     370     8289267 :   return reinterpret_cast<SeqTwoByteString*>(object);
     371             : }
     372             : 
     373             : class ConcurrentMarking::Task : public CancelableTask {
     374             :  public:
     375             :   Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
     376             :        TaskState* task_state, int task_id)
     377             :       : CancelableTask(isolate),
     378             :         concurrent_marking_(concurrent_marking),
     379             :         task_state_(task_state),
     380      241432 :         task_id_(task_id) {}
     381             : 
     382      482480 :   virtual ~Task() {}
     383             : 
     384             :  private:
     385             :   // v8::internal::CancelableTask overrides.
     386      231306 :   void RunInternal() override {
     387      231306 :     concurrent_marking_->Run(task_id_, task_state_);
     388      231438 :   }
     389             : 
     390             :   ConcurrentMarking* concurrent_marking_;
     391             :   TaskState* task_state_;
     392             :   int task_id_;
     393             :   DISALLOW_COPY_AND_ASSIGN(Task);
     394             : };
     395             : 
     396       55011 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
     397             :                                      MarkingWorklist* bailout,
     398             :                                      MarkingWorklist* on_hold,
     399             :                                      WeakObjects* weak_objects)
     400             :     : heap_(heap),
     401             :       shared_(shared),
     402             :       bailout_(bailout),
     403             :       on_hold_(on_hold),
     404             :       weak_objects_(weak_objects),
     405             :       pending_task_count_(0),
     406      110022 :       task_count_(0) {
     407             : // The runtime flag should be set only if the compile time flag was set.
     408             : #ifndef V8_CONCURRENT_MARKING
     409             :   CHECK(!FLAG_concurrent_marking);
     410             : #endif
     411      330066 :   for (int i = 0; i <= kMaxTasks; i++) {
     412      275055 :     is_pending_[i] = false;
     413             :   }
     414       55011 : }
     415             : 
     416      231020 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
     417             :   size_t kBytesUntilInterruptCheck = 64 * KB;
     418             :   int kObjectsUntilInterrupCheck = 1000;
     419             :   LiveBytesMap* live_bytes = nullptr;
     420             :   {
     421      231020 :     base::LockGuard<base::Mutex> guard(&task_state->lock);
     422      231508 :     live_bytes = &task_state->live_bytes;
     423             :   }
     424             :   ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
     425      231387 :                                    task_id);
     426             :   double time_ms;
     427             :   size_t marked_bytes = 0;
     428      231387 :   if (FLAG_trace_concurrent_marking) {
     429             :     heap_->isolate()->PrintWithTimestamp(
     430   860823071 :         "Starting concurrent marking task %d\n", task_id);
     431             :   }
     432             :   {
     433             :     TimedScope scope(&time_ms);
     434             :     bool done = false;
     435     1116429 :     while (!done) {
     436             :       base::LockGuard<base::Mutex> guard(&task_state->lock);
     437             :       size_t current_marked_bytes = 0;
     438             :       int objects_processed = 0;
     439   860609231 :       while (current_marked_bytes < kBytesUntilInterruptCheck &&
     440   429977877 :              objects_processed < kObjectsUntilInterrupCheck) {
     441             :         HeapObject* object;
     442   429865085 :         if (!shared_->Pop(task_id, &object)) {
     443             :           done = true;
     444      231517 :           break;
     445             :         }
     446   430050196 :         objects_processed++;
     447   430050196 :         Address new_space_top = heap_->new_space()->original_top();
     448   430772875 :         Address new_space_limit = heap_->new_space()->original_limit();
     449   428194045 :         Address addr = object->address();
     450   428194045 :         if (new_space_top <= addr && addr < new_space_limit) {
     451     5177500 :           on_hold_->Push(task_id, object);
     452             :         } else {
     453             :           Map* map = object->synchronized_map();
     454   847180667 :           current_marked_bytes += visitor.Visit(map, object);
     455             :         }
     456             :       }
     457      344309 :       marked_bytes += current_marked_bytes;
     458             :       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
     459      344309 :                                                 marked_bytes);
     460      653367 :       if (task_state->interrupt_request.Value()) {
     461         237 :         task_state->interrupt_condition.Wait(&task_state->lock);
     462             :       }
     463             :     }
     464             :     {
     465             :       // Take the lock to synchronize with worklist update after
     466             :       // young generation GC.
     467             :       base::LockGuard<base::Mutex> guard(&task_state->lock);
     468      231557 :       bailout_->FlushToGlobal(task_id);
     469      231543 :       on_hold_->FlushToGlobal(task_id);
     470             :     }
     471      231567 :     weak_objects_->weak_cells.FlushToGlobal(task_id);
     472      231554 :     weak_objects_->transition_arrays.FlushToGlobal(task_id);
     473      231537 :     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
     474             :     total_marked_bytes_.Increment(marked_bytes);
     475             :     {
     476      231554 :       base::LockGuard<base::Mutex> guard(&pending_lock_);
     477      231580 :       is_pending_[task_id] = false;
     478      231580 :       --pending_task_count_;
     479      231580 :       pending_condition_.NotifyAll();
     480             :     }
     481             :   }
     482      231497 :   if (FLAG_trace_concurrent_marking) {
     483             :     heap_->isolate()->PrintWithTimestamp(
     484             :         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
     485           0 :         static_cast<int>(marked_bytes / KB), time_ms);
     486             :   }
     487      231497 : }
     488             : 
     489       80508 : void ConcurrentMarking::ScheduleTasks() {
     490      161016 :   if (!FLAG_concurrent_marking) return;
     491       80508 :   base::LockGuard<base::Mutex> guard(&pending_lock_);
     492       80508 :   if (task_count_ == 0) {
     493             :     // TODO(ulan): Increase the number of tasks for platforms that benefit
     494             :     // from it.
     495             :     task_count_ = static_cast<int>(
     496       15901 :         V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
     497       15901 :     task_count_ = Max(Min(task_count_, kMaxTasks), 1);
     498             :   }
     499             :   // Task id 0 is for the main thread.
     500      241432 :   for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
     501      241432 :     if (!is_pending_[i]) {
     502      241432 :       if (FLAG_trace_concurrent_marking) {
     503             :         heap_->isolate()->PrintWithTimestamp(
     504           0 :             "Scheduling concurrent marking task %d\n", i);
     505             :       }
     506      241432 :       task_state_[i].interrupt_request.SetValue(false);
     507      241432 :       is_pending_[i] = true;
     508      241432 :       ++pending_task_count_;
     509      482864 :       Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
     510      241432 :       cancelable_id_[i] = task->id();
     511      241432 :       V8::GetCurrentPlatform()->CallOnBackgroundThread(
     512      241432 :           task, v8::Platform::kShortRunningTask);
     513             :     }
     514             :   }
     515             : }
     516             : 
     517      137857 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
     518      137857 :   if (!FLAG_concurrent_marking) return;
     519             :   {
     520      137857 :     base::LockGuard<base::Mutex> guard(&pending_lock_);
     521      137857 :     if (pending_task_count_ > 0) return;
     522             :   }
     523      240054 :   if (!shared_->IsGlobalPoolEmpty()) {
     524       63819 :     ScheduleTasks();
     525             :   }
     526             : }
     527             : 
     528          24 : void ConcurrentMarking::WaitForTasks() {
     529          48 :   if (!FLAG_concurrent_marking) return;
     530          24 :   base::LockGuard<base::Mutex> guard(&pending_lock_);
     531          40 :   while (pending_task_count_ > 0) {
     532          16 :     pending_condition_.Wait(&pending_lock_);
     533             :   }
     534             : }
     535             : 
     536       57092 : void ConcurrentMarking::EnsureCompleted() {
     537      114184 :   if (!FLAG_concurrent_marking) return;
     538       57092 :   base::LockGuard<base::Mutex> guard(&pending_lock_);
     539             :   CancelableTaskManager* task_manager =
     540      114184 :       heap_->isolate()->cancelable_task_manager();
     541      228260 :   for (int i = 1; i <= task_count_; i++) {
     542      171168 :     if (is_pending_[i]) {
     543       25901 :       if (task_manager->TryAbort(cancelable_id_[i]) ==
     544             :           CancelableTaskManager::kTaskAborted) {
     545        9837 :         is_pending_[i] = false;
     546        9837 :         --pending_task_count_;
     547             :       }
     548             :     }
     549             :   }
     550       68801 :   while (pending_task_count_ > 0) {
     551       11709 :     pending_condition_.Wait(&pending_lock_);
     552             :   }
     553             :   for (int i = 1; i <= task_count_; i++) {
     554             :     DCHECK(!is_pending_[i]);
     555             :   }
     556             : }
     557             : 
     558       57068 : void ConcurrentMarking::FlushLiveBytes(
     559             :     MajorNonAtomicMarkingState* marking_state) {
     560             :   DCHECK_EQ(pending_task_count_, 0);
     561      228164 :   for (int i = 1; i <= task_count_; i++) {
     562      171096 :     LiveBytesMap& live_bytes = task_state_[i].live_bytes;
     563      601134 :     for (auto pair : live_bytes) {
     564             :       // ClearLiveness sets the live bytes to zero.
     565             :       // Pages with zero live bytes might be already unmapped.
     566      258942 :       if (pair.second != 0) {
     567             :         marking_state->IncrementLiveBytes(pair.first, pair.second);
     568             :       }
     569             :     }
     570             :     live_bytes.clear();
     571             :   }
     572             :   total_marked_bytes_.SetValue(0);
     573       57068 : }
     574             : 
     575      455432 : void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
     576     1104182 :   for (int i = 1; i <= task_count_; i++) {
     577     1297500 :     if (task_state_[i].live_bytes.count(chunk)) {
     578        6661 :       task_state_[i].live_bytes[chunk] = 0;
     579             :     }
     580             :   }
     581      455432 : }
     582             : 
     583           6 : size_t ConcurrentMarking::TotalMarkedBytes() {
     584             :   size_t result = 0;
     585          24 :   for (int i = 1; i <= task_count_; i++) {
     586             :     result +=
     587          36 :         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
     588             :   }
     589           6 :   result += total_marked_bytes_.Value();
     590           6 :   return result;
     591             : }
     592             : 
     593       29652 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
     594       29652 :     : concurrent_marking_(concurrent_marking) {
     595       59304 :   if (!FLAG_concurrent_marking) return;
     596             :   // Request task_state for all tasks.
     597      115672 :   for (int i = 1; i <= kMaxTasks; i++) {
     598      115672 :     concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
     599             :   }
     600             :   // Now take a lock to ensure that the tasks are waiting.
     601      115672 :   for (int i = 1; i <= kMaxTasks; i++) {
     602      115672 :     concurrent_marking_->task_state_[i].lock.Lock();
     603             :   }
     604             : }
     605             : 
     606       29652 : ConcurrentMarking::PauseScope::~PauseScope() {
     607       29652 :   if (!FLAG_concurrent_marking) return;
     608      115672 :   for (int i = kMaxTasks; i >= 1; i--) {
     609      115672 :     concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
     610      115672 :     concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
     611      115672 :     concurrent_marking_->task_state_[i].lock.Unlock();
     612             :   }
     613       29652 : }
     614             : 
     615             : }  // namespace internal
     616             : }  // namespace v8

Generated by: LCOV version 1.10