LCOV - code coverage report
Current view: top level - src/heap - concurrent-marking.h (source / functions) Hit Total Coverage
Test: app.info Lines: 3 3 100.0 %
Date: 2019-04-17 Functions: 1 1 100.0 %

          Line data    Source code
       1             : // Copyright 2017 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_HEAP_CONCURRENT_MARKING_H_
       6             : #define V8_HEAP_CONCURRENT_MARKING_H_
       7             : 
       8             : #include "include/v8-platform.h"
       9             : #include "src/allocation.h"
      10             : #include "src/base/atomic-utils.h"
      11             : #include "src/base/platform/condition-variable.h"
      12             : #include "src/base/platform/mutex.h"
      13             : #include "src/cancelable-task.h"
      14             : #include "src/heap/slot-set.h"
      15             : #include "src/heap/spaces.h"
      16             : #include "src/heap/worklist.h"
      17             : #include "src/utils.h"
      18             : #include "src/v8.h"
      19             : 
      20             : namespace v8 {
      21             : namespace internal {
      22             : 
      23             : class Heap;
      24             : class Isolate;
      25             : class MajorNonAtomicMarkingState;
      26             : struct WeakObjects;
      27             : 
      28     1108461 : struct MemoryChunkData {
      29             :   intptr_t live_bytes;
      30             :   std::unique_ptr<TypedSlots> typed_slots;
      31             : };
      32             : 
      33             : using MemoryChunkDataMap =
      34             :     std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;
      35             : 
      36      124850 : class V8_EXPORT_PRIVATE ConcurrentMarking {
      37             :  public:
      38             :   // When the scope is entered, the concurrent marking tasks
      39             :   // are preempted and are not looking at the heap objects, concurrent marking
      40             :   // is resumed when the scope is exited.
      41             :   class PauseScope {
      42             :    public:
      43             :     explicit PauseScope(ConcurrentMarking* concurrent_marking);
      44             :     ~PauseScope();
      45             : 
      46             :    private:
      47             :     ConcurrentMarking* const concurrent_marking_;
      48             :     const bool resume_on_exit_;
      49             :   };
      50             : 
      51             :   enum class StopRequest {
      52             :     // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
      53             :     PREEMPT_TASKS,
      54             :     // Wait for ongoing tasks to complete (and cancels unstarted tasks).
      55             :     COMPLETE_ONGOING_TASKS,
      56             :     // Wait for all scheduled tasks to complete (only use this in tests that
      57             :     // control the full stack -- otherwise tasks cancelled by the platform can
      58             :     // make this call hang).
      59             :     COMPLETE_TASKS_FOR_TESTING,
      60             :   };
      61             : 
      62             :   // TODO(gab): The only thing that prevents this being above 7 is
      63             :   // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
      64             :   // task 0, reserved for the main thread).
      65             :   static constexpr int kMaxTasks = 7;
      66             :   using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
      67             :   using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
      68             : 
      69             :   ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
      70             :                     MarkingWorklist* on_hold, WeakObjects* weak_objects,
      71             :                     EmbedderTracingWorklist* embedder_objects);
      72             : 
      73             :   // Schedules asynchronous tasks to perform concurrent marking. Objects in the
      74             :   // heap should not be moved while these are active (can be stopped safely via
      75             :   // Stop() or PauseScope).
      76             :   void ScheduleTasks();
      77             : 
      78             :   // Stops concurrent marking per |stop_request|'s semantics. Returns true
      79             :   // if concurrent marking was in progress, false otherwise.
      80             :   bool Stop(StopRequest stop_request);
      81             : 
      82             :   void RescheduleTasksIfNeeded();
      83             :   // Flushes memory chunk data using the given marking state.
      84             :   void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
      85             :   // This function is called for a new space page that was cleared after
      86             :   // scavenge and is going to be re-used.
      87             :   void ClearMemoryChunkData(MemoryChunk* chunk);
      88             : 
      89             :   int TaskCount() { return task_count_; }
      90             : 
      91             :   // Checks if all threads are stopped.
      92             :   bool IsStopped();
      93             : 
      94             :   size_t TotalMarkedBytes();
      95             : 
      96             :   void set_ephemeron_marked(bool ephemeron_marked) {
      97             :     ephemeron_marked_.store(ephemeron_marked);
      98             :   }
      99             :   bool ephemeron_marked() { return ephemeron_marked_.load(); }
     100             : 
     101             :  private:
     102      998921 :   struct TaskState {
     103             :     // The main thread sets this flag to true when it wants the concurrent
     104             :     // marker to give up the worker thread.
     105             :     std::atomic<bool> preemption_request;
     106             :     MemoryChunkDataMap memory_chunk_data;
     107             :     size_t marked_bytes = 0;
     108             :     unsigned mark_compact_epoch;
     109             :     bool is_forced_gc;
     110             :     char cache_line_padding[64];
     111             :   };
     112             :   class Task;
     113             :   void Run(int task_id, TaskState* task_state);
     114             :   Heap* const heap_;
     115             :   MarkingWorklist* const shared_;
     116             :   MarkingWorklist* const on_hold_;
     117             :   WeakObjects* const weak_objects_;
     118             :   EmbedderTracingWorklist* const embedder_objects_;
     119             :   TaskState task_state_[kMaxTasks + 1];
     120             :   std::atomic<size_t> total_marked_bytes_{0};
     121             :   std::atomic<bool> ephemeron_marked_{false};
     122             :   base::Mutex pending_lock_;
     123             :   base::ConditionVariable pending_condition_;
     124             :   int pending_task_count_ = 0;
     125             :   bool is_pending_[kMaxTasks + 1] = {};
     126             :   CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
     127             :   int task_count_ = 0;
     128             : };
     129             : 
     130             : }  // namespace internal
     131             : }  // namespace v8
     132             : 
     133             : #endif  // V8_HEAP_CONCURRENT_MARKING_H_

Generated by: LCOV version 1.10