Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/incremental-marking.h"
6 :
7 : #include "src/compilation-cache.h"
8 : #include "src/conversions.h"
9 : #include "src/heap/concurrent-marking.h"
10 : #include "src/heap/embedder-tracing.h"
11 : #include "src/heap/gc-idle-time-handler.h"
12 : #include "src/heap/gc-tracer.h"
13 : #include "src/heap/heap-inl.h"
14 : #include "src/heap/incremental-marking-inl.h"
15 : #include "src/heap/mark-compact-inl.h"
16 : #include "src/heap/object-stats.h"
17 : #include "src/heap/objects-visiting-inl.h"
18 : #include "src/heap/objects-visiting.h"
19 : #include "src/heap/sweeper.h"
20 : #include "src/objects/data-handler-inl.h"
21 : #include "src/objects/embedder-data-array-inl.h"
22 : #include "src/objects/hash-table-inl.h"
23 : #include "src/objects/slots-inl.h"
24 : #include "src/tracing/trace-event.h"
25 : #include "src/transitions-inl.h"
26 : #include "src/v8.h"
27 : #include "src/visitors.h"
28 : #include "src/vm-state-inl.h"
29 :
30 : namespace v8 {
31 : namespace internal {
32 :
33 : using IncrementalMarkingMarkingVisitor =
34 : MarkingVisitor<FixedArrayVisitationMode::kIncremental,
35 : TraceRetainingPathMode::kDisabled,
36 : IncrementalMarking::MarkingState>;
37 :
38 32362 : void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
39 : size_t size) {
40 32362 : Heap* heap = incremental_marking_.heap();
41 : VMState<GC> state(heap->isolate());
42 : RuntimeCallTimerScope runtime_timer(
43 : heap->isolate(),
44 32362 : RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
45 32362 : incremental_marking_.AdvanceOnAllocation();
46 : // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
47 32362 : incremental_marking_.EnsureBlackAllocated(addr, size);
48 32362 : }
49 :
50 61533 : IncrementalMarking::IncrementalMarking(
51 : Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
52 : WeakObjects* weak_objects)
53 : : heap_(heap),
54 : marking_worklist_(marking_worklist),
55 : weak_objects_(weak_objects),
56 : initial_old_generation_size_(0),
57 : bytes_marked_(0),
58 : scheduled_bytes_to_mark_(0),
59 : schedule_update_time_ms_(0),
60 : bytes_marked_concurrently_(0),
61 : is_compacting_(false),
62 : should_hurry_(false),
63 : was_activated_(false),
64 : black_allocation_(false),
65 : finalize_marking_completed_(false),
66 : request_type_(NONE),
67 : new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
68 123066 : old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
69 : DCHECK_NOT_NULL(marking_worklist_);
70 : SetState(STOPPED);
71 61533 : }
72 :
73 : bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
74 126413090 : HeapObject value_heap_obj = HeapObject::cast(value);
75 : DCHECK(!marking_state()->IsImpossible(value_heap_obj));
76 : DCHECK(!marking_state()->IsImpossible(obj));
77 : #ifdef V8_CONCURRENT_MARKING
78 : // The write barrier stub generated with V8_CONCURRENT_MARKING does not
79 : // check the color of the source object.
80 : const bool need_recording = true;
81 : #else
82 : const bool need_recording = marking_state()->IsBlack(obj);
83 : #endif
84 :
85 126413090 : if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
86 19229404 : RestartIfNotMarking();
87 : }
88 126413035 : return is_compacting_ && need_recording;
89 : }
90 :
91 126136428 : void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
92 : Object value) {
93 131800299 : if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
94 : // Object is not going to be rescanned we need to record the slot.
95 : heap_->mark_compact_collector()->RecordSlot(obj, slot,
96 : HeapObject::cast(value));
97 : }
98 126136373 : }
99 :
100 5880091 : int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
101 : Address slot_address,
102 : Isolate* isolate) {
103 : HeapObject obj = HeapObject::cast(Object(raw_obj));
104 : MaybeObjectSlot slot(slot_address);
105 : isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
106 : *slot);
107 : // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
108 5880083 : return 0;
109 : }
110 :
111 276662 : void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
112 : HeapObject value) {
113 : DCHECK(IsMarking());
114 276662 : if (BaseRecordWrite(host, value)) {
115 : // Object is not going to be rescanned. We need to record the slot.
116 5155 : heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
117 : }
118 276662 : }
119 :
120 351197911 : bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
121 351197857 : if (marking_state()->WhiteToGrey(obj)) {
122 : marking_worklist()->Push(obj);
123 86526046 : return true;
124 : }
125 : return false;
126 : }
127 :
128 3417173 : void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
129 : HeapObject obj) {
130 10251529 : TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
131 13668717 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
132 : marking_state()->WhiteToGrey(obj);
133 3417180 : if (marking_state()->GreyToBlack(obj)) {
134 1924085 : RevisitObject(obj);
135 : }
136 3417183 : }
137 :
138 365 : void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
139 : DCHECK(IsMarking());
140 : DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
141 : DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
142 : DCHECK_NE(from, to);
143 :
144 : MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
145 :
146 730 : if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
147 : // Nothing to do if the object is in black area.
148 : return;
149 : }
150 210 : MarkBlackAndVisitObjectDueToLayoutChange(from);
151 : DCHECK(marking_state()->IsBlack(from));
152 : // Mark the new address as black.
153 420 : if (from->address() + kTaggedSize == to->address()) {
154 : // The old and the new markbits overlap. The |to| object has the
155 : // grey color. To make it black, we need to set the second bit.
156 : DCHECK(new_mark_bit.Get<kAtomicity>());
157 : new_mark_bit.Next().Set<kAtomicity>();
158 : } else {
159 : bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
160 : DCHECK(success);
161 : USE(success);
162 : }
163 : DCHECK(marking_state()->IsBlack(to));
164 : }
165 :
166 44578 : class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
167 : public:
168 : explicit IncrementalMarkingRootMarkingVisitor(
169 : IncrementalMarking* incremental_marking)
170 44578 : : heap_(incremental_marking->heap()) {}
171 :
172 137544578 : void VisitRootPointer(Root root, const char* description,
173 : FullObjectSlot p) override {
174 : MarkObjectByPointer(p);
175 137544578 : }
176 :
177 1117524 : void VisitRootPointers(Root root, const char* description,
178 : FullObjectSlot start, FullObjectSlot end) override {
179 33453652 : for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
180 1117524 : }
181 :
182 : private:
183 : void MarkObjectByPointer(FullObjectSlot p) {
184 : Object obj = *p;
185 168763182 : if (!obj->IsHeapObject()) return;
186 :
187 324178684 : heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
188 : }
189 :
190 : Heap* heap_;
191 : };
192 :
193 0 : void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
194 : PagedSpace* space) {
195 196985 : for (Page* p : *space) {
196 132821 : p->SetOldGenerationPageFlags(false);
197 : }
198 0 : }
199 :
200 :
201 0 : void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
202 : NewSpace* space) {
203 146482 : for (Page* p : *space) {
204 125094 : p->SetYoungGenerationPageFlags(false);
205 : }
206 0 : }
207 :
208 :
209 21388 : void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
210 21388 : DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
211 21388 : DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
212 21388 : DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
213 21388 : DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
214 :
215 21388 : for (LargePage* p : *heap_->new_lo_space()) {
216 1044 : p->SetYoungGenerationPageFlags(false);
217 : DCHECK(p->IsLargePage());
218 : }
219 :
220 21388 : for (LargePage* p : *heap_->lo_space()) {
221 6399 : p->SetOldGenerationPageFlags(false);
222 : }
223 :
224 21388 : for (LargePage* p : *heap_->code_lo_space()) {
225 8847 : p->SetOldGenerationPageFlags(false);
226 : }
227 21388 : }
228 :
229 :
230 0 : void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
231 201443 : for (Page* p : *space) {
232 125882 : p->SetOldGenerationPageFlags(true);
233 : }
234 0 : }
235 :
236 :
237 0 : void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
238 163872 : for (Page* p : *space) {
239 138685 : p->SetYoungGenerationPageFlags(true);
240 : }
241 0 : }
242 :
243 :
244 25187 : void IncrementalMarking::ActivateIncrementalWriteBarrier() {
245 25187 : ActivateIncrementalWriteBarrier(heap_->old_space());
246 25187 : ActivateIncrementalWriteBarrier(heap_->map_space());
247 25187 : ActivateIncrementalWriteBarrier(heap_->code_space());
248 25187 : ActivateIncrementalWriteBarrier(heap_->new_space());
249 :
250 25187 : for (LargePage* p : *heap_->new_lo_space()) {
251 612 : p->SetYoungGenerationPageFlags(true);
252 : DCHECK(p->IsLargePage());
253 : }
254 :
255 25187 : for (LargePage* p : *heap_->lo_space()) {
256 5319 : p->SetOldGenerationPageFlags(true);
257 : }
258 :
259 25187 : for (LargePage* p : *heap_->code_lo_space()) {
260 7412 : p->SetOldGenerationPageFlags(true);
261 : }
262 25187 : }
263 :
264 :
265 73950 : bool IncrementalMarking::WasActivated() { return was_activated_; }
266 :
267 :
268 1415339 : bool IncrementalMarking::CanBeActivated() {
269 : // Only start incremental marking in a safe state: 1) when incremental
270 : // marking is turned on, 2) when we are currently not in a GC, and
271 : // 3) when we are currently not serializing or deserializing the heap.
272 1394045 : return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
273 2317588 : heap_->deserialization_complete() &&
274 1415339 : !heap_->isolate()->serializer_enabled();
275 : }
276 :
277 :
278 21388 : void IncrementalMarking::Deactivate() {
279 21388 : DeactivateIncrementalWriteBarrier();
280 21388 : }
281 :
282 28843 : void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
283 28843 : if (FLAG_trace_incremental_marking) {
284 : int old_generation_size_mb =
285 5 : static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
286 : int old_generation_limit_mb =
287 5 : static_cast<int>(heap()->old_generation_allocation_limit() / MB);
288 10 : heap()->isolate()->PrintWithTimestamp(
289 : "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
290 : "slack %dMB\n",
291 : Heap::GarbageCollectionReasonToString(gc_reason),
292 : old_generation_size_mb, old_generation_limit_mb,
293 5 : Max(0, old_generation_limit_mb - old_generation_size_mb));
294 : }
295 : DCHECK(FLAG_incremental_marking);
296 : DCHECK(state_ == STOPPED);
297 : DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
298 : DCHECK(!heap_->isolate()->serializer_enabled());
299 :
300 28843 : Counters* counters = heap_->isolate()->counters();
301 :
302 : counters->incremental_marking_reason()->AddSample(
303 28843 : static_cast<int>(gc_reason));
304 : HistogramTimerScope incremental_marking_scope(
305 : counters->gc_incremental_marking_start());
306 86529 : TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
307 115372 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
308 57686 : heap_->tracer()->NotifyIncrementalMarkingStart();
309 :
310 28843 : start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
311 28843 : initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
312 57686 : old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
313 28843 : bytes_marked_ = 0;
314 28843 : scheduled_bytes_to_mark_ = 0;
315 28843 : schedule_update_time_ms_ = start_time_ms_;
316 28843 : bytes_marked_concurrently_ = 0;
317 28843 : should_hurry_ = false;
318 28843 : was_activated_ = true;
319 :
320 57686 : if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
321 13164 : StartMarking();
322 : } else {
323 15679 : if (FLAG_trace_incremental_marking) {
324 : heap()->isolate()->PrintWithTimestamp(
325 0 : "[IncrementalMarking] Start sweeping.\n");
326 : }
327 : SetState(SWEEPING);
328 : }
329 :
330 28843 : heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
331 28843 : &new_generation_observer_);
332 57686 : incremental_marking_job()->Start(heap_);
333 28843 : }
334 :
335 :
336 25187 : void IncrementalMarking::StartMarking() {
337 50374 : if (heap_->isolate()->serializer_enabled()) {
338 : // Black allocation currently starts when we start incremental marking,
339 : // but we cannot enable black allocation while deserializing. Hence, we
340 : // have to delay the start of incremental marking in that case.
341 0 : if (FLAG_trace_incremental_marking) {
342 : heap()->isolate()->PrintWithTimestamp(
343 0 : "[IncrementalMarking] Start delayed - serializer\n");
344 : }
345 0 : return;
346 : }
347 25187 : if (FLAG_trace_incremental_marking) {
348 : heap()->isolate()->PrintWithTimestamp(
349 5 : "[IncrementalMarking] Start marking\n");
350 : }
351 :
352 : is_compacting_ =
353 50374 : !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
354 :
355 : SetState(MARKING);
356 :
357 25187 : ActivateIncrementalWriteBarrier();
358 :
359 : // Marking bits are cleared by the sweeper.
360 : #ifdef VERIFY_HEAP
361 : if (FLAG_verify_heap) {
362 : heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
363 : }
364 : #endif
365 :
366 50374 : heap_->isolate()->compilation_cache()->MarkCompactPrologue();
367 :
368 25187 : StartBlackAllocation();
369 :
370 : // Mark strong roots grey.
371 : IncrementalMarkingRootMarkingVisitor visitor(this);
372 25187 : heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
373 :
374 25187 : if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
375 49850 : heap_->concurrent_marking()->ScheduleTasks();
376 : }
377 :
378 : // Ready to start incremental marking.
379 25187 : if (FLAG_trace_incremental_marking) {
380 5 : heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
381 : }
382 :
383 : {
384 : // TracePrologue may call back into V8 in corner cases, requiring that
385 : // marking (including write barriers) is fully set up.
386 100748 : TRACE_GC(heap()->tracer(),
387 : GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
388 50374 : heap_->local_embedder_heap_tracer()->TracePrologue();
389 : }
390 : }
391 :
392 25856 : void IncrementalMarking::StartBlackAllocation() {
393 : DCHECK(!black_allocation_);
394 : DCHECK(IsMarking());
395 25856 : black_allocation_ = true;
396 25856 : heap()->old_space()->MarkLinearAllocationAreaBlack();
397 25856 : heap()->map_space()->MarkLinearAllocationAreaBlack();
398 25856 : heap()->code_space()->MarkLinearAllocationAreaBlack();
399 25856 : if (FLAG_trace_incremental_marking) {
400 : heap()->isolate()->PrintWithTimestamp(
401 5 : "[IncrementalMarking] Black allocation started\n");
402 : }
403 25856 : }
404 :
405 669 : void IncrementalMarking::PauseBlackAllocation() {
406 : DCHECK(IsMarking());
407 669 : heap()->old_space()->UnmarkLinearAllocationArea();
408 669 : heap()->map_space()->UnmarkLinearAllocationArea();
409 669 : heap()->code_space()->UnmarkLinearAllocationArea();
410 669 : if (FLAG_trace_incremental_marking) {
411 : heap()->isolate()->PrintWithTimestamp(
412 0 : "[IncrementalMarking] Black allocation paused\n");
413 : }
414 669 : black_allocation_ = false;
415 669 : }
416 :
417 24809 : void IncrementalMarking::FinishBlackAllocation() {
418 24809 : if (black_allocation_) {
419 21388 : black_allocation_ = false;
420 21388 : if (FLAG_trace_incremental_marking) {
421 : heap()->isolate()->PrintWithTimestamp(
422 5 : "[IncrementalMarking] Black allocation finished\n");
423 : }
424 : }
425 24809 : }
426 :
427 32362 : void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
428 32362 : if (black_allocation() && allocated != kNullAddress) {
429 : HeapObject object = HeapObject::FromAddress(allocated);
430 60114 : if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
431 286 : if (heap_->IsLargeObject(object)) {
432 : marking_state()->WhiteToBlack(object);
433 : } else {
434 267 : Page::FromAddress(allocated)->CreateBlackArea(allocated,
435 267 : allocated + size);
436 : }
437 : }
438 : }
439 32362 : }
440 :
441 0 : void IncrementalMarking::MarkRoots() {
442 : DCHECK(!finalize_marking_completed_);
443 : DCHECK(IsMarking());
444 :
445 : IncrementalMarkingRootMarkingVisitor visitor(this);
446 19391 : heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
447 0 : }
448 :
449 12952 : bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
450 12952 : if (age == 0) {
451 : // The map has aged. Do not retain this map.
452 : return false;
453 : }
454 11763 : Object constructor = map->GetConstructor();
455 23526 : if (!constructor->IsHeapObject() ||
456 : marking_state()->IsWhite(HeapObject::cast(constructor))) {
457 : // The constructor is dead, no new objects with this map can
458 : // be created. Do not retain this map.
459 : return false;
460 : }
461 4138 : return true;
462 : }
463 :
464 :
465 19391 : void IncrementalMarking::RetainMaps() {
466 : // Do not retain dead maps if flag disables it or there is
467 : // - memory pressure (reduce_memory_footprint_),
468 : // - GC is requested by tests or dev-tools (abort_incremental_marking_).
469 38764 : bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
470 19373 : FLAG_retain_maps_for_n_gc == 0;
471 19391 : WeakArrayList retained_maps = heap()->retained_maps();
472 : int length = retained_maps->length();
473 : // The number_of_disposed_maps separates maps in the retained_maps
474 : // array that were created before and after context disposal.
475 : // We do not age and retain disposed maps to avoid memory leaks.
476 19391 : int number_of_disposed_maps = heap()->number_of_disposed_maps_;
477 94733 : for (int i = 0; i < length; i += 2) {
478 : MaybeObject value = retained_maps->Get(i);
479 : HeapObject map_heap_object;
480 37671 : if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
481 : continue;
482 : }
483 : int age = retained_maps->Get(i + 1).ToSmi().value();
484 : int new_age;
485 : Map map = Map::cast(map_heap_object);
486 44554 : if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
487 : marking_state()->IsWhite(map)) {
488 12952 : if (ShouldRetainMap(map, age)) {
489 4138 : WhiteToGreyAndPush(map);
490 : }
491 : Object prototype = map->prototype();
492 36478 : if (age > 0 && prototype->IsHeapObject() &&
493 : marking_state()->IsWhite(HeapObject::cast(prototype))) {
494 : // The prototype is not marked, age the map.
495 11500 : new_age = age - 1;
496 : } else {
497 : // The prototype and the constructor are marked, this map keeps only
498 : // transition tree alive, not JSObjects. Do not age the map.
499 : new_age = age;
500 : }
501 : } else {
502 12406 : new_age = FLAG_retain_maps_for_n_gc;
503 : }
504 : // Compact the array and update the age.
505 25358 : if (new_age != age) {
506 11589 : retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
507 : }
508 : }
509 19391 : }
510 :
511 19391 : void IncrementalMarking::FinalizeIncrementally() {
512 77564 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
513 : DCHECK(!finalize_marking_completed_);
514 : DCHECK(IsMarking());
515 :
516 19391 : double start = heap_->MonotonicallyIncreasingTimeInMs();
517 :
518 : // After finishing incremental marking, we try to discover all unmarked
519 : // objects to reduce the marking load in the final pause.
520 : // 1) We scan and mark the roots again to find all changes to the root set.
521 : // 2) Age and retain maps embedded in optimized code.
522 : MarkRoots();
523 :
524 : // Map retaining is needed for perfromance, not correctness,
525 : // so we can do it only once at the beginning of the finalization.
526 19391 : RetainMaps();
527 :
528 19391 : finalize_marking_completed_ = true;
529 :
530 19391 : if (FLAG_trace_incremental_marking) {
531 5 : double end = heap_->MonotonicallyIncreasingTimeInMs();
532 5 : double delta = end - start;
533 : heap()->isolate()->PrintWithTimestamp(
534 5 : "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
535 : }
536 19391 : }
537 :
538 20973 : void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
539 20973 : if (!IsMarking()) return;
540 :
541 669 : Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
542 :
543 : #ifdef ENABLE_MINOR_MC
544 : MinorMarkCompactCollector::MarkingState* minor_marking_state =
545 : heap()->minor_mark_compact_collector()->marking_state();
546 : #else
547 : void* minor_marking_state = nullptr;
548 : #endif // ENABLE_MINOR_MC
549 :
550 : marking_worklist()->Update([
551 : #ifdef DEBUG
552 : // this is referred inside DCHECK.
553 : this,
554 : #endif
555 : filler_map, minor_marking_state](
556 813908 : HeapObject obj, HeapObject* out) -> bool {
557 : DCHECK(obj->IsHeapObject());
558 : // Only pointers to from space have to be updated.
559 813908 : if (Heap::InFromPage(obj)) {
560 : MapWord map_word = obj->map_word();
561 340180 : if (!map_word.IsForwardingAddress()) {
562 : // There may be objects on the marking deque that do not exist anymore,
563 : // e.g. left trimmed objects or objects from the root set (frames).
564 : // If these object are dead at scavenging time, their marking deque
565 : // entries will not point to forwarding addresses. Hence, we can discard
566 : // them.
567 : return false;
568 : }
569 : HeapObject dest = map_word.ToForwardingAddress();
570 : DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
571 284976 : *out = dest;
572 284976 : return true;
573 473728 : } else if (Heap::InToPage(obj)) {
574 : // The object may be on a large page or on a page that was moved in new
575 : // space.
576 : DCHECK(Heap::IsLargeObject(obj) ||
577 : Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
578 : #ifdef ENABLE_MINOR_MC
579 0 : if (minor_marking_state->IsWhite(obj)) {
580 : return false;
581 : }
582 : #endif // ENABLE_MINOR_MC
583 : // Either a large object or an object marked by the minor mark-compactor.
584 0 : *out = obj;
585 0 : return true;
586 : } else {
587 : // The object may be on a page that was moved from new to old space. Only
588 : // applicable during minor MC garbage collections.
589 473728 : if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
590 : #ifdef ENABLE_MINOR_MC
591 0 : if (minor_marking_state->IsWhite(obj)) {
592 : return false;
593 : }
594 : #endif // ENABLE_MINOR_MC
595 0 : *out = obj;
596 0 : return true;
597 : }
598 : DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
599 : // Skip one word filler objects that appear on the
600 : // stack when we perform in place array shift.
601 473728 : if (obj->map() != filler_map) {
602 473728 : *out = obj;
603 473728 : return true;
604 : }
605 : return false;
606 : }
607 669 : });
608 :
609 669 : UpdateWeakReferencesAfterScavenge();
610 : }
611 :
612 669 : void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
613 669 : weak_objects_->weak_references.Update(
614 : [](std::pair<HeapObject, HeapObjectSlot> slot_in,
615 : std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
616 222980 : HeapObject heap_obj = slot_in.first;
617 222980 : HeapObject forwarded = ForwardingAddress(heap_obj);
618 :
619 222980 : if (!forwarded.is_null()) {
620 : ptrdiff_t distance_to_slot =
621 220703 : slot_in.second.address() - slot_in.first.ptr();
622 220703 : Address new_slot = forwarded.ptr() + distance_to_slot;
623 220703 : slot_out->first = forwarded;
624 220703 : slot_out->second = HeapObjectSlot(new_slot);
625 : return true;
626 : }
627 :
628 : return false;
629 669 : });
630 669 : weak_objects_->weak_objects_in_code.Update(
631 : [](std::pair<HeapObject, Code> slot_in,
632 : std::pair<HeapObject, Code>* slot_out) -> bool {
633 710 : HeapObject heap_obj = slot_in.first;
634 710 : HeapObject forwarded = ForwardingAddress(heap_obj);
635 :
636 710 : if (!forwarded.is_null()) {
637 710 : slot_out->first = forwarded;
638 710 : slot_out->second = slot_in.second;
639 : return true;
640 : }
641 :
642 : return false;
643 669 : });
644 669 : weak_objects_->ephemeron_hash_tables.Update(
645 : [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
646 0 : EphemeronHashTable forwarded = ForwardingAddress(slot_in);
647 :
648 0 : if (!forwarded.is_null()) {
649 0 : *slot_out = forwarded;
650 : return true;
651 : }
652 :
653 : return false;
654 669 : });
655 :
656 0 : auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
657 0 : HeapObject key = slot_in.key;
658 0 : HeapObject value = slot_in.value;
659 0 : HeapObject forwarded_key = ForwardingAddress(key);
660 0 : HeapObject forwarded_value = ForwardingAddress(value);
661 :
662 0 : if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
663 0 : *slot_out = Ephemeron{forwarded_key, forwarded_value};
664 : return true;
665 : }
666 :
667 : return false;
668 : };
669 :
670 669 : weak_objects_->current_ephemerons.Update(ephemeron_updater);
671 669 : weak_objects_->next_ephemerons.Update(ephemeron_updater);
672 669 : weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
673 :
674 669 : weak_objects_->flushed_js_functions.Update(
675 : [](JSFunction slot_in, JSFunction* slot_out) -> bool {
676 103 : JSFunction forwarded = ForwardingAddress(slot_in);
677 :
678 103 : if (!forwarded.is_null()) {
679 103 : *slot_out = forwarded;
680 : return true;
681 : }
682 :
683 : return false;
684 669 : });
685 : #ifdef DEBUG
686 : weak_objects_->bytecode_flushing_candidates.Iterate(
687 : [](SharedFunctionInfo candidate) {
688 : DCHECK(!Heap::InYoungGeneration(candidate));
689 : });
690 : #endif
691 669 : }
692 :
693 20973 : void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
694 : size_t dead_bytes_in_new_space) {
695 20973 : if (!IsMarking()) return;
696 1338 : bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
697 : }
698 :
699 : bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
700 : if (!obj->IsFixedArray()) return false;
701 : MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
702 : return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
703 : }
704 :
705 : int IncrementalMarking::VisitObject(Map map, HeapObject obj) {
706 : DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
707 : if (!marking_state()->GreyToBlack(obj)) {
708 : // The object can already be black in these cases:
709 : // 1. The object is a fixed array with the progress bar.
710 : // 2. The object is a JSObject that was colored black before
711 : // unsafe layout change.
712 : // 3. The object is a string that was colored black before
713 : // unsafe layout change.
714 : // 4. The object is materizalized by the deoptimizer.
715 : // 5. The object is a descriptor array marked black by
716 : // the descriptor array marking barrier.
717 : DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
718 : obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
719 : obj->IsString() || obj->IsDescriptorArray());
720 : }
721 : DCHECK(marking_state()->IsBlack(obj));
722 41877771 : WhiteToGreyAndPush(map);
723 : IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
724 41877951 : marking_state());
725 : return visitor.Visit(map, obj);
726 : }
727 :
728 18876617 : void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
729 37753208 : if (IsMarking() && marking_state()->IsBlack(obj)) {
730 18876592 : RevisitObject(obj);
731 : }
732 18876652 : }
733 :
734 20816846 : void IncrementalMarking::RevisitObject(HeapObject obj) {
735 : DCHECK(IsMarking());
736 : DCHECK(marking_state()->IsBlack(obj));
737 : Page* page = Page::FromHeapObject(obj);
738 41633676 : if (page->owner()->identity() == LO_SPACE ||
739 : page->owner()->identity() == NEW_LO_SPACE) {
740 : page->ResetProgressBar();
741 : }
742 : Map map = obj->map();
743 20816846 : WhiteToGreyAndPush(map);
744 : IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
745 : marking_state());
746 : visitor.Visit(map, obj);
747 20816759 : }
748 :
749 3455526 : void IncrementalMarking::VisitDescriptors(HeapObject host,
750 : DescriptorArray descriptors,
751 : int number_of_own_descriptors) {
752 : IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
753 : marking_state());
754 : // This is necessary because the Scavenger records slots only for the
755 : // promoted black objects and the marking visitor of DescriptorArray skips
756 : // the descriptors marked by the visitor.VisitDescriptors() below.
757 : visitor.MarkDescriptorArrayBlack(host, descriptors);
758 : visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
759 3455525 : }
760 :
761 : intptr_t IncrementalMarking::ProcessMarkingWorklist(
762 : intptr_t bytes_to_process, ForceCompletionAction completion) {
763 : intptr_t bytes_processed = 0;
764 40511530 : while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
765 42906944 : HeapObject obj = marking_worklist()->Pop();
766 42906911 : if (obj.is_null()) break;
767 : // Left trimming may result in white, grey, or black filler objects on the
768 : // marking deque. Ignore these objects.
769 41877598 : if (obj->IsFiller()) {
770 : DCHECK(!marking_state()->IsImpossible(obj));
771 : continue;
772 : }
773 39467015 : bytes_processed += VisitObject(obj->map(), obj);
774 : }
775 : return bytes_processed;
776 : }
777 :
778 1201159 : StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
779 1201159 : if (!ShouldDoEmbedderStep()) return StepResult::kNoImmediateWork;
780 :
781 : constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
782 :
783 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
784 0 : double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
785 : bool empty_worklist;
786 0 : do {
787 : {
788 : LocalEmbedderHeapTracer::ProcessingScope scope(
789 0 : heap_->local_embedder_heap_tracer());
790 0 : HeapObject object;
791 : size_t cnt = 0;
792 : empty_worklist = true;
793 0 : while (marking_worklist()->embedder()->Pop(0, &object)) {
794 0 : scope.TracePossibleWrapper(JSObject::cast(object));
795 0 : if (++cnt == kObjectsToProcessBeforeInterrupt) {
796 : cnt = 0;
797 : empty_worklist = false;
798 : break;
799 : }
800 : }
801 : }
802 0 : heap_->local_embedder_heap_tracer()->Trace(deadline);
803 0 : } while (!empty_worklist &&
804 0 : (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
805 0 : heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
806 : return empty_worklist ? StepResult::kNoImmediateWork
807 0 : : StepResult::kMoreWorkRemaining;
808 : }
809 :
810 21388 : void IncrementalMarking::Hurry() {
811 : // A scavenge may have pushed new objects on the marking deque (due to black
812 : // allocation) even in COMPLETE state. This may happen if scavenges are
813 : // forced e.g. in tests. It should not happen when COMPLETE was set when
814 : // incremental marking finished and a regular GC was triggered after that
815 : // because should_hurry_ will force a full GC.
816 21388 : if (!marking_worklist()->IsEmpty()) {
817 : double start = 0.0;
818 6509 : if (FLAG_trace_incremental_marking) {
819 0 : start = heap_->MonotonicallyIncreasingTimeInMs();
820 0 : if (FLAG_trace_incremental_marking) {
821 0 : heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
822 : }
823 : }
824 : // TODO(gc) hurry can mark objects it encounters black as mutator
825 : // was stopped.
826 : ProcessMarkingWorklist(0, FORCE_COMPLETION);
827 : SetState(COMPLETE);
828 6509 : if (FLAG_trace_incremental_marking) {
829 0 : double end = heap_->MonotonicallyIncreasingTimeInMs();
830 0 : double delta = end - start;
831 0 : if (FLAG_trace_incremental_marking) {
832 0 : heap()->isolate()->PrintWithTimestamp(
833 : "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
834 0 : static_cast<int>(delta));
835 : }
836 : }
837 : }
838 21388 : }
839 :
840 :
841 24833 : void IncrementalMarking::Stop() {
842 24857 : if (IsStopped()) return;
843 24809 : if (FLAG_trace_incremental_marking) {
844 : int old_generation_size_mb =
845 5 : static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
846 : int old_generation_limit_mb =
847 5 : static_cast<int>(heap()->old_generation_allocation_limit() / MB);
848 5 : heap()->isolate()->PrintWithTimestamp(
849 : "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
850 : "overshoot %dMB\n",
851 : old_generation_size_mb, old_generation_limit_mb,
852 5 : Max(0, old_generation_size_mb - old_generation_limit_mb));
853 : }
854 :
855 49618 : SpaceIterator it(heap_);
856 223281 : while (it.has_next()) {
857 198472 : Space* space = it.next();
858 198472 : if (space == heap_->new_space()) {
859 24809 : space->RemoveAllocationObserver(&new_generation_observer_);
860 : } else {
861 173663 : space->RemoveAllocationObserver(&old_generation_observer_);
862 : }
863 : }
864 :
865 : IncrementalMarking::set_should_hurry(false);
866 24809 : heap_->isolate()->stack_guard()->ClearGC();
867 : SetState(STOPPED);
868 24809 : is_compacting_ = false;
869 24809 : FinishBlackAllocation();
870 : }
871 :
872 :
873 21388 : void IncrementalMarking::Finalize() {
874 21388 : Hurry();
875 21388 : Stop();
876 21388 : }
877 :
878 :
879 1002267 : void IncrementalMarking::FinalizeMarking(CompletionAction action) {
880 : DCHECK(!finalize_marking_completed_);
881 1002267 : if (FLAG_trace_incremental_marking) {
882 : heap()->isolate()->PrintWithTimestamp(
883 : "[IncrementalMarking] requesting finalization of incremental "
884 5 : "marking.\n");
885 : }
886 1002267 : request_type_ = FINALIZATION;
887 1002267 : if (action == GC_VIA_STACK_GUARD) {
888 990160 : heap_->isolate()->stack_guard()->RequestGC();
889 : }
890 1002267 : }
891 :
892 :
893 21561 : void IncrementalMarking::MarkingComplete(CompletionAction action) {
894 : SetState(COMPLETE);
895 : // We will set the stack guard to request a GC now. This will mean the rest
896 : // of the GC gets performed as soon as possible (we can't do a GC here in a
897 : // record-write context). If a few things get allocated between now and then
898 : // that shouldn't make us do a scavenge and keep being incremental, so we set
899 : // the should-hurry flag to indicate that there can't be much work left to do.
900 : set_should_hurry(true);
901 21561 : if (FLAG_trace_incremental_marking) {
902 : heap()->isolate()->PrintWithTimestamp(
903 5 : "[IncrementalMarking] Complete (normal).\n");
904 : }
905 21561 : request_type_ = COMPLETE_MARKING;
906 21561 : if (action == GC_VIA_STACK_GUARD) {
907 8125 : heap_->isolate()->stack_guard()->RequestGC();
908 : }
909 21561 : }
910 :
911 :
912 73955 : void IncrementalMarking::Epilogue() {
913 73955 : was_activated_ = false;
914 73955 : finalize_marking_completed_ = false;
915 73955 : }
916 :
917 0 : bool IncrementalMarking::ShouldDoEmbedderStep() {
918 2195521 : return state_ == MARKING && FLAG_incremental_marking_wrappers &&
919 994362 : heap_->local_embedder_heap_tracer()->InUse();
920 : }
921 :
922 1585317 : void IncrementalMarking::FastForwardSchedule() {
923 1585317 : if (scheduled_bytes_to_mark_ < bytes_marked_) {
924 145051 : scheduled_bytes_to_mark_ = bytes_marked_;
925 145051 : if (FLAG_trace_incremental_marking) {
926 5 : heap_->isolate()->PrintWithTimestamp(
927 5 : "[IncrementalMarking] Fast-forwarded schedule\n");
928 : }
929 : }
930 1585317 : }
931 :
932 0 : void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
933 : // Consider marking close to finalization if 75% of the initial old
934 : // generation was marked.
935 1008222 : if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
936 583050 : FastForwardSchedule();
937 : }
938 0 : }
939 :
940 1008222 : void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
941 : // Time interval that should be sufficient to complete incremental marking.
942 : constexpr double kTargetMarkingWallTimeInMs = 500;
943 : constexpr double kMinTimeBetweenScheduleInMs = 10;
944 1008222 : if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
945 : double delta_ms =
946 8899 : Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
947 8899 : schedule_update_time_ms_ = time_ms;
948 :
949 : size_t bytes_to_mark =
950 8899 : (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
951 : AddScheduledBytesToMark(bytes_to_mark);
952 :
953 8899 : if (FLAG_trace_incremental_marking) {
954 0 : heap_->isolate()->PrintWithTimestamp(
955 : "[IncrementalMarking] Scheduled %" PRIuS
956 : "KB to mark based on time delta %.1fms\n",
957 0 : bytes_to_mark / KB, delta_ms);
958 : }
959 : }
960 :
961 : namespace {
962 : StepResult CombineStepResults(StepResult a, StepResult b) {
963 2402318 : if (a == StepResult::kMoreWorkRemaining ||
964 1201159 : b == StepResult::kMoreWorkRemaining)
965 : return StepResult::kMoreWorkRemaining;
966 2014210 : if (a == StepResult::kWaitingForFinalization ||
967 1007105 : b == StepResult::kWaitingForFinalization)
968 : return StepResult::kWaitingForFinalization;
969 : return StepResult::kNoImmediateWork;
970 : }
971 : } // anonymous namespace
972 :
973 1008222 : StepResult IncrementalMarking::AdvanceWithDeadline(
974 : double deadline_in_ms, CompletionAction completion_action,
975 : StepOrigin step_origin) {
976 : HistogramTimerScope incremental_marking_scope(
977 1008222 : heap_->isolate()->counters()->gc_incremental_marking());
978 3024666 : TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
979 5041110 : TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
980 : DCHECK(!IsStopped());
981 :
982 1008222 : ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
983 : FastForwardScheduleIfCloseToFinalization();
984 :
985 : double remaining_time_in_ms = 0.0;
986 : StepResult result;
987 : do {
988 : StepResult v8_result =
989 1201159 : V8Step(kStepSizeInMs / 2, completion_action, step_origin);
990 : remaining_time_in_ms =
991 1201159 : deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
992 : StepResult embedder_result =
993 1201159 : EmbedderStep(Min(kStepSizeInMs, remaining_time_in_ms));
994 : result = CombineStepResults(v8_result, embedder_result);
995 : remaining_time_in_ms =
996 1201159 : deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
997 1201159 : } while (remaining_time_in_ms >= kStepSizeInMs &&
998 : result == StepResult::kMoreWorkRemaining);
999 1008222 : return result;
1000 : }
1001 :
1002 72511 : void IncrementalMarking::FinalizeSweeping() {
1003 : DCHECK(state_ == SWEEPING);
1004 282438 : if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1005 129773 : (!FLAG_concurrent_sweeping ||
1006 64868 : !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
1007 8834 : heap_->mark_compact_collector()->EnsureSweepingCompleted();
1008 : }
1009 145022 : if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1010 : #ifdef DEBUG
1011 : heap_->VerifyCountersAfterSweeping();
1012 : #endif
1013 12023 : StartMarking();
1014 : }
1015 72511 : }
1016 :
1017 0 : size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
1018 : // Update bytes_allocated_ based on the allocation counter.
1019 31981 : size_t current_counter = heap_->OldGenerationAllocationCounter();
1020 31981 : size_t result = current_counter - old_generation_allocation_counter_;
1021 31981 : old_generation_allocation_counter_ = current_counter;
1022 0 : return result;
1023 : }
1024 :
1025 31981 : size_t IncrementalMarking::StepSizeToMakeProgress() {
1026 : const size_t kTargetStepCount = 256;
1027 : const size_t kTargetStepCountAtOOM = 32;
1028 : const size_t kMaxStepSizeInByte = 256 * KB;
1029 31981 : size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
1030 :
1031 31981 : if (!heap()->CanExpandOldGeneration(oom_slack)) {
1032 144 : return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
1033 : }
1034 :
1035 31837 : return Min(Max(initial_old_generation_size_ / kTargetStepCount,
1036 : IncrementalMarking::kMinStepSizeInBytes),
1037 31837 : kMaxStepSizeInByte);
1038 : }
1039 :
1040 0 : void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
1041 40880 : if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
1042 : // The overflow case.
1043 0 : scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
1044 : } else {
1045 40880 : scheduled_bytes_to_mark_ += bytes_to_mark;
1046 : }
1047 0 : }
1048 :
1049 31981 : void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
1050 31981 : size_t progress_bytes = StepSizeToMakeProgress();
1051 : size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
1052 31981 : size_t bytes_to_mark = progress_bytes + allocation_bytes;
1053 : AddScheduledBytesToMark(bytes_to_mark);
1054 :
1055 31981 : if (FLAG_trace_incremental_marking) {
1056 0 : heap_->isolate()->PrintWithTimestamp(
1057 : "[IncrementalMarking] Scheduled %" PRIuS
1058 : "KB to mark based on allocation (progress="
1059 : "%" PRIuS "KB, allocation=%" PRIuS "KB)\n",
1060 0 : bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
1061 : }
1062 31981 : }
1063 :
1064 1044527 : void IncrementalMarking::FetchBytesMarkedConcurrently() {
1065 1044527 : if (FLAG_concurrent_marking) {
1066 : size_t current_bytes_marked_concurrently =
1067 1037857 : heap()->concurrent_marking()->TotalMarkedBytes();
1068 : // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
1069 : // short period of time when a concurrent marking task is finishing.
1070 1037857 : if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
1071 : bytes_marked_ +=
1072 30394 : current_bytes_marked_concurrently - bytes_marked_concurrently_;
1073 30394 : bytes_marked_concurrently_ = current_bytes_marked_concurrently;
1074 : }
1075 1037857 : if (FLAG_trace_incremental_marking) {
1076 11 : heap_->isolate()->PrintWithTimestamp(
1077 : "[IncrementalMarking] Marked %" PRIuS "KB on background threads\n",
1078 33 : heap_->concurrent_marking()->TotalMarkedBytes() / KB);
1079 : }
1080 : }
1081 1044527 : }
1082 :
1083 1044527 : size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
1084 1044527 : FetchBytesMarkedConcurrently();
1085 1044527 : if (FLAG_trace_incremental_marking) {
1086 11 : if (scheduled_bytes_to_mark_ > bytes_marked_) {
1087 0 : heap_->isolate()->PrintWithTimestamp(
1088 : "[IncrementalMarking] Marker is %" PRIuS "KB behind schedule\n",
1089 0 : (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
1090 : } else {
1091 11 : heap_->isolate()->PrintWithTimestamp(
1092 : "[IncrementalMarking] Marker is %" PRIuS "KB ahead of schedule\n",
1093 22 : (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
1094 : }
1095 : }
1096 : // Allow steps on allocation to get behind the schedule by small ammount.
1097 : // This gives higher priority to steps in tasks.
1098 1044527 : size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
1099 1044527 : if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
1100 : return 0;
1101 18541 : return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
1102 : }
1103 :
1104 32362 : void IncrementalMarking::AdvanceOnAllocation() {
1105 : // Code using an AlwaysAllocateScope assumes that the GC state does not
1106 : // change; that implies that no marking steps must be performed.
1107 129448 : if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1108 96742 : (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
1109 381 : return;
1110 : }
1111 : HistogramTimerScope incremental_marking_scope(
1112 31981 : heap_->isolate()->counters()->gc_incremental_marking());
1113 95943 : TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1114 159905 : TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1115 31981 : ScheduleBytesToMarkBasedOnAllocation();
1116 31981 : V8Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
1117 : }
1118 :
1119 1239440 : StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
1120 : CompletionAction action,
1121 : StepOrigin step_origin) {
1122 : StepResult result = StepResult::kMoreWorkRemaining;
1123 1239440 : double start = heap_->MonotonicallyIncreasingTimeInMs();
1124 :
1125 1239440 : if (state_ == SWEEPING) {
1126 362550 : TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1127 72510 : FinalizeSweeping();
1128 : }
1129 :
1130 : size_t bytes_processed = 0, bytes_to_process = 0;
1131 1239440 : if (state_ == MARKING) {
1132 1044527 : if (FLAG_concurrent_marking) {
1133 1037857 : heap_->new_space()->ResetOriginalTop();
1134 1037857 : heap_->new_lo_space()->ResetPendingObject();
1135 : // It is safe to merge back all objects that were on hold to the shared
1136 : // work list at Step because we are at a safepoint where all objects
1137 : // are properly initialized.
1138 : marking_worklist()->shared()->MergeGlobalPool(
1139 1037857 : marking_worklist()->on_hold());
1140 : }
1141 :
1142 : // Only print marking worklist in debug mode to save ~40KB of code size.
1143 : #ifdef DEBUG
1144 : if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
1145 : FLAG_trace_gc_verbose) {
1146 : marking_worklist()->Print();
1147 : }
1148 : #endif
1149 1044527 : if (FLAG_trace_incremental_marking) {
1150 11 : heap_->isolate()->PrintWithTimestamp(
1151 : "[IncrementalMarking] Marking speed %.fKB/ms\n",
1152 11 : heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1153 : }
1154 : // The first step after Scavenge will see many allocated bytes.
1155 : // Cap the step size to distribute the marking work more uniformly.
1156 1044527 : size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1157 : max_step_size_in_ms,
1158 1044527 : heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1159 1044527 : bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
1160 1044525 : if (bytes_to_process == 0) {
1161 : result = StepResult::kNoImmediateWork;
1162 : }
1163 :
1164 : bytes_processed =
1165 2089042 : ProcessMarkingWorklist(Max(bytes_to_process, kMinStepSizeInBytes));
1166 :
1167 1044517 : bytes_marked_ += bytes_processed;
1168 :
1169 1044517 : if (marking_worklist()->IsEmpty()) {
1170 : result = StepResult::kNoImmediateWork;
1171 2047696 : if (heap_->local_embedder_heap_tracer()
1172 : ->ShouldFinalizeIncrementalMarking()) {
1173 1023828 : if (!finalize_marking_completed_) {
1174 1002267 : FinalizeMarking(action);
1175 1002267 : FastForwardSchedule();
1176 : result = StepResult::kWaitingForFinalization;
1177 2004534 : incremental_marking_job()->Start(heap_);
1178 : } else {
1179 21561 : MarkingComplete(action);
1180 : result = StepResult::kWaitingForFinalization;
1181 : }
1182 : } else {
1183 20 : heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
1184 : }
1185 : }
1186 : }
1187 1239440 : if (FLAG_concurrent_marking) {
1188 1232770 : marking_worklist()->ShareWorkIfGlobalPoolIsEmpty();
1189 2465540 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1190 : }
1191 :
1192 1239440 : double end = heap_->MonotonicallyIncreasingTimeInMs();
1193 1239440 : double duration = (end - start);
1194 : // Note that we report zero bytes here when sweeping was in progress or
1195 : // when we just started incremental marking. In these cases we did not
1196 : // process the marking deque.
1197 2478880 : heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1198 1239440 : if (FLAG_trace_incremental_marking) {
1199 11 : heap_->isolate()->PrintWithTimestamp(
1200 : "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
1201 : step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1202 11 : bytes_processed / KB, bytes_to_process / KB, duration);
1203 : }
1204 1239440 : return result;
1205 : }
1206 :
1207 : } // namespace internal
1208 120216 : } // namespace v8
|