Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6 : #define V8_HEAP_INCREMENTAL_MARKING_H_
7 :
8 : #include "src/cancelable-task.h"
9 : #include "src/heap/heap.h"
10 : #include "src/heap/incremental-marking-job.h"
11 : #include "src/heap/mark-compact.h"
12 :
13 : namespace v8 {
14 : namespace internal {
15 :
16 : class HeapObject;
17 : class MarkBit;
18 : class Map;
19 : class Object;
20 : class PagedSpace;
21 :
22 : enum class StepOrigin { kV8, kTask };
23 :
24 : class V8_EXPORT_PRIVATE IncrementalMarking {
25 : public:
26 : enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
27 :
28 : enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
29 :
30 : enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
31 :
32 : enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
33 :
34 : #ifdef V8_CONCURRENT_MARKING
35 : using MarkingState = IncrementalMarkingState;
36 : #else
37 : using MarkingState = MajorNonAtomicMarkingState;
38 : #endif // V8_CONCURRENT_MARKING
39 : using AtomicMarkingState = MajorAtomicMarkingState;
40 : using NonAtomicMarkingState = MajorNonAtomicMarkingState;
41 :
42 : class PauseBlackAllocationScope {
43 : public:
44 : explicit PauseBlackAllocationScope(IncrementalMarking* marking)
45 : : marking_(marking), paused_(false) {
46 23594 : if (marking_->black_allocation()) {
47 : paused_ = true;
48 636 : marking_->PauseBlackAllocation();
49 : }
50 : }
51 :
52 : ~PauseBlackAllocationScope() {
53 23594 : if (paused_) {
54 636 : marking_->StartBlackAllocation();
55 : }
56 : }
57 :
58 : private:
59 : IncrementalMarking* marking_;
60 : bool paused_;
61 : };
62 :
63 : // It's hard to know how much work the incremental marker should do to make
64 : // progress in the face of the mutator creating new work for it. We start
65 : // of at a moderate rate of work and gradually increase the speed of the
66 : // incremental marker until it completes.
67 : // Do some marking every time this much memory has been allocated or that many
68 : // heavy (color-checking) write barriers have been invoked.
69 : static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
70 : static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
71 : static const size_t kMinStepSizeInBytes = 64 * KB;
72 :
73 : static const int kStepSizeInMs = 1;
74 : static const int kMaxStepSizeInMs = 5;
75 :
76 : #ifndef DEBUG
77 : static const intptr_t kActivationThreshold = 8 * MB;
78 : #else
79 : static const intptr_t kActivationThreshold = 0;
80 : #endif
81 :
82 : #ifdef V8_CONCURRENT_MARKING
83 : static const AccessMode kAtomicity = AccessMode::ATOMIC;
84 : #else
85 : static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
86 : #endif
87 :
88 : IncrementalMarking(Heap* heap,
89 : MarkCompactCollector::MarkingWorklist* marking_worklist,
90 : WeakObjects* weak_objects);
91 :
92 60936996 : MarkingState* marking_state() { return &marking_state_; }
93 :
94 : AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
95 :
96 : NonAtomicMarkingState* non_atomic_marking_state() {
97 : return &non_atomic_marking_state_;
98 : }
99 :
100 : void NotifyLeftTrimming(HeapObject from, HeapObject to);
101 :
102 : V8_INLINE void TransferColor(HeapObject from, HeapObject to);
103 :
104 : State state() const {
105 : DCHECK(state_ == STOPPED || FLAG_incremental_marking);
106 : return state_;
107 : }
108 :
109 : bool should_hurry() const { return should_hurry_; }
110 58816 : void set_should_hurry(bool val) { should_hurry_ = val; }
111 :
112 : bool finalize_marking_completed() const {
113 : return finalize_marking_completed_;
114 : }
115 :
116 : void SetWeakClosureWasOverApproximatedForTesting(bool val) {
117 5 : finalize_marking_completed_ = val;
118 : }
119 :
120 3326536 : inline bool IsStopped() const { return state() == STOPPED; }
121 :
122 83797 : inline bool IsSweeping() const { return state() == SWEEPING; }
123 :
124 92733174 : inline bool IsMarking() const { return state() >= MARKING; }
125 :
126 : inline bool IsMarkingIncomplete() const { return state() == MARKING; }
127 :
128 1226288 : inline bool IsComplete() const { return state() == COMPLETE; }
129 :
130 : inline bool IsReadyToOverApproximateWeakClosure() const {
131 37133 : return request_type_ == FINALIZATION && !finalize_marking_completed_;
132 : }
133 :
134 : inline bool NeedsFinalization() {
135 28355 : return IsMarking() &&
136 2649 : (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
137 : }
138 :
139 : GCRequestType request_type() const { return request_type_; }
140 :
141 18464 : void reset_request_type() { request_type_ = NONE; }
142 :
143 : bool CanBeActivated();
144 :
145 : bool WasActivated();
146 :
147 : void Start(GarbageCollectionReason gc_reason);
148 :
149 : void FinalizeIncrementally();
150 :
151 : void UpdateMarkingWorklistAfterScavenge();
152 : void UpdateWeakReferencesAfterScavenge();
153 : void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
154 :
155 : void Hurry();
156 :
157 : void Finalize();
158 :
159 : void Stop();
160 :
161 : void FinalizeMarking(CompletionAction action);
162 :
163 : void MarkingComplete(CompletionAction action);
164 :
165 : void Epilogue();
166 :
167 : // Performs incremental marking steps until deadline_in_ms is reached. It
168 : // returns the remaining time that cannot be used for incremental marking
169 : // anymore because a single step would exceed the deadline.
170 : double AdvanceIncrementalMarking(double deadline_in_ms,
171 : CompletionAction completion_action,
172 : StepOrigin step_origin);
173 :
174 : void FinalizeSweeping();
175 :
176 : size_t Step(size_t bytes_to_process, CompletionAction action,
177 : StepOrigin step_origin);
178 : void StepOnAllocation(size_t bytes_to_process, double max_step_size);
179 :
180 : bool ShouldDoEmbedderStep();
181 : void EmbedderStep(double duration);
182 :
183 : inline void RestartIfNotMarking();
184 :
185 : // {raw_obj} and {slot_address} are raw Address values instead of a
186 : // HeapObject and a MaybeObjectSlot because this is called from
187 : // generated code via ExternalReference.
188 : static int RecordWriteFromCode(Address raw_obj, Address slot_address,
189 : Isolate* isolate);
190 :
191 : // Record a slot for compaction. Returns false for objects that are
192 : // guaranteed to be rescanned or not guaranteed to survive.
193 : //
194 : // No slots in white objects should be recorded, as some slots are typed and
195 : // cannot be interpreted correctly if the underlying object does not survive
196 : // the incremental cycle (stays white).
197 : V8_INLINE bool BaseRecordWrite(HeapObject obj, Object value);
198 : V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
199 : V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
200 : MaybeObject value);
201 : void RevisitObject(HeapObject obj);
202 : // Ensures that all descriptors int range [0, number_of_own_descripts)
203 : // are visited.
204 : void VisitDescriptors(HeapObject host, DescriptorArray array,
205 : int number_of_own_descriptors);
206 :
207 : void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, Object value);
208 : void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value);
209 :
210 : // Returns true if the function succeeds in transitioning the object
211 : // from white to grey.
212 : bool WhiteToGreyAndPush(HeapObject obj);
213 :
214 : // This function is used to color the object black before it undergoes an
215 : // unsafe layout change. This is a part of synchronization protocol with
216 : // the concurrent marker.
217 : void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
218 :
219 4083509 : bool IsCompacting() { return IsMarking() && is_compacting_; }
220 :
221 49237 : void NotifyIncompleteScanOfObject(int unscanned_bytes) {
222 49237 : unscanned_bytes_of_large_object_ = unscanned_bytes;
223 49237 : }
224 :
225 : void ProcessBlackAllocatedObject(HeapObject obj);
226 :
227 60936996 : Heap* heap() const { return heap_; }
228 :
229 : IncrementalMarkingJob* incremental_marking_job() {
230 : return &incremental_marking_job_;
231 : }
232 :
233 : bool black_allocation() { return black_allocation_; }
234 :
235 : void StartBlackAllocationForTesting() {
236 35 : if (!black_allocation_) {
237 0 : StartBlackAllocation();
238 : }
239 : }
240 :
241 62213027 : MarkCompactCollector::MarkingWorklist* marking_worklist() const {
242 62213027 : return marking_worklist_;
243 : }
244 :
245 : void Deactivate();
246 :
247 : private:
248 125736 : class Observer : public AllocationObserver {
249 : public:
250 : Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
251 : : AllocationObserver(step_size),
252 125764 : incremental_marking_(incremental_marking) {}
253 :
254 : void Step(int bytes_allocated, Address, size_t) override;
255 :
256 : private:
257 : IncrementalMarking& incremental_marking_;
258 : };
259 :
260 : void StartMarking();
261 :
262 : void StartBlackAllocation();
263 : void PauseBlackAllocation();
264 : void FinishBlackAllocation();
265 :
266 : void MarkRoots();
267 : bool ShouldRetainMap(Map map, int age);
268 : // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
269 : // increase chances of reusing of map transition tree in future.
270 : void RetainMaps();
271 :
272 : void ActivateIncrementalWriteBarrier(PagedSpace* space);
273 : void ActivateIncrementalWriteBarrier(NewSpace* space);
274 : void ActivateIncrementalWriteBarrier();
275 :
276 : void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
277 : void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
278 : void DeactivateIncrementalWriteBarrier();
279 :
280 : V8_INLINE intptr_t ProcessMarkingWorklist(
281 : intptr_t bytes_to_process,
282 : ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
283 :
284 : V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject object);
285 :
286 : // Visits the object and returns its size.
287 : V8_INLINE int VisitObject(Map map, HeapObject obj);
288 :
289 : void IncrementIdleMarkingDelayCounter();
290 :
291 : void AdvanceIncrementalMarkingOnAllocation();
292 :
293 : size_t StepSizeToKeepUpWithAllocations();
294 : size_t StepSizeToMakeProgress();
295 :
296 : void SetState(State s) {
297 176245 : state_ = s;
298 : heap_->SetIsMarkingFlag(s >= MARKING);
299 : }
300 :
301 : Heap* const heap_;
302 : MarkCompactCollector::MarkingWorklist* const marking_worklist_;
303 : WeakObjects* weak_objects_;
304 :
305 : double start_time_ms_;
306 : size_t initial_old_generation_size_;
307 : size_t old_generation_allocation_counter_;
308 : size_t bytes_allocated_;
309 : size_t bytes_marked_ahead_of_schedule_;
310 : // A sample of concurrent_marking()->TotalMarkedBytes() at the last
311 : // incremental marking step. It is used for updating
312 : // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
313 : size_t bytes_marked_concurrently_;
314 : size_t unscanned_bytes_of_large_object_;
315 :
316 : // Must use SetState() above to update state_
317 : State state_;
318 :
319 : bool is_compacting_;
320 : bool should_hurry_;
321 : bool was_activated_;
322 : bool black_allocation_;
323 : bool finalize_marking_completed_;
324 : bool trace_wrappers_toggle_;
325 : IncrementalMarkingJob incremental_marking_job_;
326 :
327 : GCRequestType request_type_;
328 :
329 : Observer new_generation_observer_;
330 : Observer old_generation_observer_;
331 :
332 : MarkingState marking_state_;
333 : AtomicMarkingState atomic_marking_state_;
334 : NonAtomicMarkingState non_atomic_marking_state_;
335 :
336 : DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
337 : };
338 : } // namespace internal
339 : } // namespace v8
340 :
341 : #endif // V8_HEAP_INCREMENTAL_MARKING_H_
|