Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6 : #define V8_HEAP_INCREMENTAL_MARKING_H_
7 :
8 : #include "src/cancelable-task.h"
9 : #include "src/execution.h"
10 : #include "src/heap/heap.h"
11 : #include "src/heap/incremental-marking-job.h"
12 : #include "src/heap/mark-compact.h"
13 : #include "src/heap/spaces.h"
14 : #include "src/objects.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 :
19 : // Forward declarations.
20 : class MarkBit;
21 : class PagedSpace;
22 :
23 : enum class StepOrigin { kV8, kTask };
24 :
25 : class V8_EXPORT_PRIVATE IncrementalMarking {
26 : public:
27 : enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
28 :
29 : enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
30 :
31 : enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
32 :
33 : enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
34 :
35 : static void MarkGrey(Heap* heap, HeapObject* object);
36 :
37 : static void MarkBlack(HeapObject* object, int size);
38 :
39 : // Transfers mark bits without requiring proper object headers.
40 : static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
41 :
42 : // Transfers color including live byte count, requiring properly set up
43 : // objects.
44 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
45 : V8_INLINE static void TransferColor(HeapObject* from, HeapObject* to) {
46 5147018 : if (ObjectMarking::IsBlack<access_mode>(to, MarkingState::Internal(to))) {
47 : DCHECK(to->GetHeap()->incremental_marking()->black_allocation());
48 : return;
49 : }
50 :
51 : DCHECK(ObjectMarking::IsWhite<access_mode>(to, MarkingState::Internal(to)));
52 2559367 : if (ObjectMarking::IsGrey<access_mode>(from,
53 5118734 : MarkingState::Internal(from))) {
54 32844 : ObjectMarking::WhiteToGrey<access_mode>(to, MarkingState::Internal(to));
55 2542945 : } else if (ObjectMarking::IsBlack<access_mode>(
56 5085890 : from, MarkingState::Internal(from))) {
57 515518 : ObjectMarking::WhiteToBlack<access_mode>(to, MarkingState::Internal(to));
58 : }
59 : }
60 :
61 : explicit IncrementalMarking(Heap* heap);
62 :
63 : static void Initialize();
64 :
65 3063473178 : State state() {
66 : DCHECK(state_ == STOPPED || FLAG_incremental_marking);
67 3063473178 : return state_;
68 : }
69 :
70 : bool should_hurry() { return should_hurry_; }
71 2641 : void set_should_hurry(bool val) { should_hurry_ = val; }
72 :
73 : bool finalize_marking_completed() const {
74 : return finalize_marking_completed_;
75 : }
76 :
77 : void SetWeakClosureWasOverApproximatedForTesting(bool val) {
78 : finalize_marking_completed_ = val;
79 : }
80 :
81 3118412 : inline bool IsStopped() { return state() == STOPPED; }
82 :
83 53346 : inline bool IsSweeping() { return state() == SWEEPING; }
84 :
85 3064003907 : INLINE(bool IsMarking()) { return state() >= MARKING; }
86 :
87 431509 : inline bool IsMarkingIncomplete() { return state() == MARKING; }
88 :
89 5887 : inline bool IsComplete() { return state() == COMPLETE; }
90 :
91 : inline bool IsReadyToOverApproximateWeakClosure() const {
92 5672 : return request_type_ == FINALIZATION && !finalize_marking_completed_;
93 : }
94 :
95 73875 : inline bool NeedsFinalization() {
96 77862 : return IsMarking() &&
97 77862 : (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
98 : }
99 :
100 : GCRequestType request_type() const { return request_type_; }
101 :
102 375 : void reset_request_type() { request_type_ = NONE; }
103 :
104 : bool CanBeActivated();
105 :
106 : bool WasActivated();
107 :
108 : void Start(GarbageCollectionReason gc_reason);
109 :
110 : void FinalizeIncrementally();
111 :
112 : void UpdateMarkingDequeAfterScavenge();
113 :
114 : void Hurry();
115 :
116 : void Finalize();
117 :
118 : void Stop();
119 :
120 : void FinalizeMarking(CompletionAction action);
121 :
122 : void MarkingComplete(CompletionAction action);
123 :
124 : void Epilogue();
125 :
126 : // Performs incremental marking steps until deadline_in_ms is reached. It
127 : // returns the remaining time that cannot be used for incremental marking
128 : // anymore because a single step would exceed the deadline.
129 : double AdvanceIncrementalMarking(double deadline_in_ms,
130 : CompletionAction completion_action,
131 : ForceCompletionAction force_completion,
132 : StepOrigin step_origin);
133 :
134 : // It's hard to know how much work the incremental marker should do to make
135 : // progress in the face of the mutator creating new work for it. We start
136 : // of at a moderate rate of work and gradually increase the speed of the
137 : // incremental marker until it completes.
138 : // Do some marking every time this much memory has been allocated or that many
139 : // heavy (color-checking) write barriers have been invoked.
140 : static const size_t kAllocatedThreshold = 64 * KB;
141 :
142 : static const int kStepSizeInMs = 1;
143 : static const int kMaxStepSizeInMs = 5;
144 :
145 : // This is the upper bound for how many times we allow finalization of
146 : // incremental marking to be postponed.
147 : static const int kMaxIdleMarkingDelayCounter = 3;
148 :
149 : #ifndef DEBUG
150 : static const intptr_t kActivationThreshold = 8 * MB;
151 : #else
152 : static const intptr_t kActivationThreshold = 0;
153 : #endif
154 :
155 : void FinalizeSweeping();
156 :
157 : size_t Step(size_t bytes_to_process, CompletionAction action,
158 : ForceCompletionAction completion, StepOrigin step_origin);
159 :
160 : inline void RestartIfNotMarking();
161 :
162 : static void RecordWriteFromCode(HeapObject* obj, Object** slot,
163 : Isolate* isolate);
164 :
165 : static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
166 : Isolate* isolate);
167 :
168 : // Record a slot for compaction. Returns false for objects that are
169 : // guaranteed to be rescanned or not guaranteed to survive.
170 : //
171 : // No slots in white objects should be recorded, as some slots are typed and
172 : // cannot be interpreted correctly if the underlying object does not survive
173 : // the incremental cycle (stays white).
174 : INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
175 : INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
176 : INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
177 : INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
178 : Code* value));
179 :
180 : void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
181 : void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
182 : void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
183 : void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
184 : void RecordCodeTargetPatch(Address pc, HeapObject* value);
185 :
186 : void WhiteToGreyAndPush(HeapObject* obj);
187 :
188 526878 : inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
189 1053756 : SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
190 526878 : }
191 :
192 188999 : inline void SetNewSpacePageFlags(Page* chunk) {
193 188999 : SetNewSpacePageFlags(chunk, IsMarking());
194 188999 : }
195 :
196 37156516 : bool IsCompacting() { return IsMarking() && is_compacting_; }
197 :
198 : void ActivateGeneratedStub(Code* stub);
199 :
200 : void NotifyIncompleteScanOfObject(int unscanned_bytes) {
201 43012 : unscanned_bytes_of_large_object_ = unscanned_bytes;
202 : }
203 :
204 : void ClearIdleMarkingDelayCounter();
205 :
206 : bool IsIdleMarkingDelayCounterLimitReached();
207 :
208 : void IterateBlackObject(HeapObject* object);
209 :
210 : Heap* heap() const { return heap_; }
211 :
212 : IncrementalMarkingJob* incremental_marking_job() {
213 : return &incremental_marking_job_;
214 : }
215 :
216 : bool black_allocation() { return black_allocation_; }
217 :
218 : void StartBlackAllocationForTesting() { StartBlackAllocation(); }
219 :
220 : void AbortBlackAllocation();
221 :
222 : private:
223 118570 : class Observer : public AllocationObserver {
224 : public:
225 : Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
226 : : AllocationObserver(step_size),
227 121564 : incremental_marking_(incremental_marking) {}
228 :
229 33510 : void Step(int bytes_allocated, Address, size_t) override {
230 33510 : incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
231 33510 : }
232 :
233 : private:
234 : IncrementalMarking& incremental_marking_;
235 : };
236 :
237 : int64_t SpaceLeftInOldSpace();
238 :
239 : void StartMarking();
240 :
241 : void StartBlackAllocation();
242 : void FinishBlackAllocation();
243 :
244 : void MarkRoots();
245 : void ProcessWeakCells();
246 : // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
247 : // increase chances of reusing of map transition tree in future.
248 : void RetainMaps();
249 :
250 : void ActivateIncrementalWriteBarrier(PagedSpace* space);
251 : static void ActivateIncrementalWriteBarrier(NewSpace* space);
252 : void ActivateIncrementalWriteBarrier();
253 :
254 : static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
255 : static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
256 : void DeactivateIncrementalWriteBarrier();
257 :
258 : static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
259 : bool is_compacting);
260 :
261 : static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
262 :
263 : INLINE(void ProcessMarkingDeque());
264 :
265 : INLINE(intptr_t ProcessMarkingDeque(
266 : intptr_t bytes_to_process,
267 : ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
268 :
269 : INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
270 :
271 : void IncrementIdleMarkingDelayCounter();
272 :
273 : void AdvanceIncrementalMarkingOnAllocation();
274 :
275 : size_t StepSizeToKeepUpWithAllocations();
276 : size_t StepSizeToMakeProgress();
277 :
278 : Heap* heap_;
279 :
280 : double start_time_ms_;
281 : size_t initial_old_generation_size_;
282 : size_t old_generation_allocation_counter_;
283 : size_t bytes_allocated_;
284 : size_t bytes_marked_ahead_of_schedule_;
285 : size_t unscanned_bytes_of_large_object_;
286 :
287 : State state_;
288 :
289 : int idle_marking_delay_counter_;
290 : int incremental_marking_finalization_rounds_;
291 :
292 : bool is_compacting_;
293 : bool should_hurry_;
294 : bool was_activated_;
295 : bool black_allocation_;
296 : bool finalize_marking_completed_;
297 : bool trace_wrappers_toggle_;
298 : IncrementalMarkingJob incremental_marking_job_;
299 :
300 : GCRequestType request_type_;
301 :
302 : Observer new_generation_observer_;
303 : Observer old_generation_observer_;
304 :
305 : DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
306 : };
307 : } // namespace internal
308 : } // namespace v8
309 :
310 : #endif // V8_HEAP_INCREMENTAL_MARKING_H_
|