Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/mark-compact.h"
6 :
7 : #include <unordered_map>
8 :
9 : #include "src/base/utils/random-number-generator.h"
10 : #include "src/cancelable-task.h"
11 : #include "src/compilation-cache.h"
12 : #include "src/deoptimizer.h"
13 : #include "src/execution.h"
14 : #include "src/frames-inl.h"
15 : #include "src/global-handles.h"
16 : #include "src/heap/array-buffer-collector.h"
17 : #include "src/heap/array-buffer-tracker-inl.h"
18 : #include "src/heap/gc-tracer.h"
19 : #include "src/heap/incremental-marking-inl.h"
20 : #include "src/heap/invalidated-slots-inl.h"
21 : #include "src/heap/item-parallel-job.h"
22 : #include "src/heap/local-allocator-inl.h"
23 : #include "src/heap/mark-compact-inl.h"
24 : #include "src/heap/object-stats.h"
25 : #include "src/heap/objects-visiting-inl.h"
26 : #include "src/heap/spaces-inl.h"
27 : #include "src/heap/sweeper.h"
28 : #include "src/heap/worklist.h"
29 : #include "src/ic/stub-cache.h"
30 : #include "src/objects/foreign.h"
31 : #include "src/objects/hash-table-inl.h"
32 : #include "src/objects/js-objects-inl.h"
33 : #include "src/objects/maybe-object.h"
34 : #include "src/objects/slots-inl.h"
35 : #include "src/transitions-inl.h"
36 : #include "src/utils-inl.h"
37 : #include "src/v8.h"
38 : #include "src/vm-state-inl.h"
39 :
40 : namespace v8 {
41 : namespace internal {
42 :
43 : const char* Marking::kWhiteBitPattern = "00";
44 : const char* Marking::kBlackBitPattern = "11";
45 : const char* Marking::kGreyBitPattern = "10";
46 : const char* Marking::kImpossibleBitPattern = "01";
47 :
48 : // The following has to hold in order for {MarkingState::MarkBitFrom} to not
49 : // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
50 : STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
51 :
52 : // =============================================================================
53 : // Verifiers
54 : // =============================================================================
55 :
56 : #ifdef VERIFY_HEAP
57 : namespace {
58 :
59 : class MarkingVerifier : public ObjectVisitor, public RootVisitor {
60 : public:
61 : virtual void Run() = 0;
62 :
63 : protected:
64 : explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
65 :
66 : virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
67 :
68 : virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
69 : virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
70 : virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
71 :
72 : virtual bool IsMarked(HeapObject object) = 0;
73 :
74 : virtual bool IsBlackOrGrey(HeapObject object) = 0;
75 :
76 : void VisitPointers(HeapObject host, ObjectSlot start,
77 : ObjectSlot end) override {
78 : VerifyPointers(start, end);
79 : }
80 :
81 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
82 : MaybeObjectSlot end) override {
83 : VerifyPointers(start, end);
84 : }
85 :
86 : void VisitRootPointers(Root root, const char* description,
87 : FullObjectSlot start, FullObjectSlot end) override {
88 : VerifyRootPointers(start, end);
89 : }
90 :
91 : void VerifyRoots(VisitMode mode);
92 : void VerifyMarkingOnPage(const Page* page, Address start, Address end);
93 : void VerifyMarking(NewSpace* new_space);
94 : void VerifyMarking(PagedSpace* paged_space);
95 : void VerifyMarking(LargeObjectSpace* lo_space);
96 :
97 : Heap* heap_;
98 : };
99 :
100 : void MarkingVerifier::VerifyRoots(VisitMode mode) {
101 : heap_->IterateStrongRoots(this, mode);
102 : }
103 :
104 : void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
105 : Address end) {
106 : HeapObject object;
107 : Address next_object_must_be_here_or_later = start;
108 : for (Address current = start; current < end;) {
109 : object = HeapObject::FromAddress(current);
110 : // One word fillers at the end of a black area can be grey.
111 : if (IsBlackOrGrey(object) &&
112 : object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
113 : CHECK(IsMarked(object));
114 : CHECK(current >= next_object_must_be_here_or_later);
115 : object->Iterate(this);
116 : next_object_must_be_here_or_later = current + object->Size();
117 : // The object is either part of a black area of black allocation or a
118 : // regular black object
119 : CHECK(
120 : bitmap(page)->AllBitsSetInRange(
121 : page->AddressToMarkbitIndex(current),
122 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
123 : bitmap(page)->AllBitsClearInRange(
124 : page->AddressToMarkbitIndex(current + kTaggedSize * 2),
125 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
126 : current = next_object_must_be_here_or_later;
127 : } else {
128 : current += kTaggedSize;
129 : }
130 : }
131 : }
132 :
133 : void MarkingVerifier::VerifyMarking(NewSpace* space) {
134 : Address end = space->top();
135 : // The bottom position is at the start of its page. Allows us to use
136 : // page->area_start() as start of range on all pages.
137 : CHECK_EQ(space->first_allocatable_address(),
138 : space->first_page()->area_start());
139 :
140 : PageRange range(space->first_allocatable_address(), end);
141 : for (auto it = range.begin(); it != range.end();) {
142 : Page* page = *(it++);
143 : Address limit = it != range.end() ? page->area_end() : end;
144 : CHECK(limit == end || !page->Contains(end));
145 : VerifyMarkingOnPage(page, page->area_start(), limit);
146 : }
147 : }
148 :
149 : void MarkingVerifier::VerifyMarking(PagedSpace* space) {
150 : for (Page* p : *space) {
151 : VerifyMarkingOnPage(p, p->area_start(), p->area_end());
152 : }
153 : }
154 :
155 : void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
156 : LargeObjectIterator it(lo_space);
157 : for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
158 : if (IsBlackOrGrey(obj)) {
159 : obj->Iterate(this);
160 : }
161 : }
162 : }
163 :
164 : class FullMarkingVerifier : public MarkingVerifier {
165 : public:
166 : explicit FullMarkingVerifier(Heap* heap)
167 : : MarkingVerifier(heap),
168 : marking_state_(
169 : heap->mark_compact_collector()->non_atomic_marking_state()) {}
170 :
171 : void Run() override {
172 : VerifyRoots(VISIT_ONLY_STRONG);
173 : VerifyMarking(heap_->new_space());
174 : VerifyMarking(heap_->old_space());
175 : VerifyMarking(heap_->code_space());
176 : VerifyMarking(heap_->map_space());
177 : VerifyMarking(heap_->lo_space());
178 : VerifyMarking(heap_->code_lo_space());
179 : }
180 :
181 : protected:
182 : Bitmap* bitmap(const MemoryChunk* chunk) override {
183 : return marking_state_->bitmap(chunk);
184 : }
185 :
186 : bool IsMarked(HeapObject object) override {
187 : return marking_state_->IsBlack(object);
188 : }
189 :
190 : bool IsBlackOrGrey(HeapObject object) override {
191 : return marking_state_->IsBlackOrGrey(object);
192 : }
193 :
194 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
195 : VerifyPointersImpl(start, end);
196 : }
197 :
198 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
199 : VerifyPointersImpl(start, end);
200 : }
201 :
202 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
203 : VerifyPointersImpl(start, end);
204 : }
205 :
206 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
207 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
208 : VerifyHeapObjectImpl(target);
209 : }
210 :
211 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
212 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
213 : if (!host->IsWeakObject(rinfo->target_object())) {
214 : HeapObject object = rinfo->target_object();
215 : VerifyHeapObjectImpl(object);
216 : }
217 : }
218 :
219 : private:
220 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
221 : CHECK(marking_state_->IsBlackOrGrey(heap_object));
222 : }
223 :
224 : template <typename TSlot>
225 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
226 : for (TSlot slot = start; slot < end; ++slot) {
227 : typename TSlot::TObject object = *slot;
228 : HeapObject heap_object;
229 : if (object.GetHeapObjectIfStrong(&heap_object)) {
230 : VerifyHeapObjectImpl(heap_object);
231 : }
232 : }
233 : }
234 :
235 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
236 : };
237 :
238 : class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
239 : public:
240 : virtual void Run() = 0;
241 :
242 : void VisitPointers(HeapObject host, ObjectSlot start,
243 : ObjectSlot end) override {
244 : VerifyPointers(start, end);
245 : }
246 :
247 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
248 : MaybeObjectSlot end) override {
249 : VerifyPointers(start, end);
250 : }
251 :
252 : void VisitRootPointers(Root root, const char* description,
253 : FullObjectSlot start, FullObjectSlot end) override {
254 : VerifyRootPointers(start, end);
255 : }
256 :
257 : protected:
258 : explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
259 :
260 : inline Heap* heap() { return heap_; }
261 :
262 : virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
263 : virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
264 : virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
265 :
266 : void VerifyRoots(VisitMode mode);
267 : void VerifyEvacuationOnPage(Address start, Address end);
268 : void VerifyEvacuation(NewSpace* new_space);
269 : void VerifyEvacuation(PagedSpace* paged_space);
270 :
271 : Heap* heap_;
272 : };
273 :
274 : void EvacuationVerifier::VerifyRoots(VisitMode mode) {
275 : heap_->IterateStrongRoots(this, mode);
276 : }
277 :
278 : void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
279 : Address current = start;
280 : while (current < end) {
281 : HeapObject object = HeapObject::FromAddress(current);
282 : if (!object->IsFiller()) object->Iterate(this);
283 : current += object->Size();
284 : }
285 : }
286 :
287 : void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
288 : PageRange range(space->first_allocatable_address(), space->top());
289 : for (auto it = range.begin(); it != range.end();) {
290 : Page* page = *(it++);
291 : Address current = page->area_start();
292 : Address limit = it != range.end() ? page->area_end() : space->top();
293 : CHECK(limit == space->top() || !page->Contains(space->top()));
294 : VerifyEvacuationOnPage(current, limit);
295 : }
296 : }
297 :
298 : void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
299 : for (Page* p : *space) {
300 : if (p->IsEvacuationCandidate()) continue;
301 : if (p->Contains(space->top())) {
302 : CodePageMemoryModificationScope memory_modification_scope(p);
303 : heap_->CreateFillerObjectAt(
304 : space->top(), static_cast<int>(space->limit() - space->top()),
305 : ClearRecordedSlots::kNo);
306 : }
307 : VerifyEvacuationOnPage(p->area_start(), p->area_end());
308 : }
309 : }
310 :
311 : class FullEvacuationVerifier : public EvacuationVerifier {
312 : public:
313 : explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
314 :
315 : void Run() override {
316 : VerifyRoots(VISIT_ALL);
317 : VerifyEvacuation(heap_->new_space());
318 : VerifyEvacuation(heap_->old_space());
319 : VerifyEvacuation(heap_->code_space());
320 : VerifyEvacuation(heap_->map_space());
321 : }
322 :
323 : protected:
324 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
325 : CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
326 : CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
327 : }
328 :
329 : template <typename TSlot>
330 : void VerifyPointersImpl(TSlot start, TSlot end) {
331 : for (TSlot current = start; current < end; ++current) {
332 : typename TSlot::TObject object = *current;
333 : HeapObject heap_object;
334 : if (object.GetHeapObjectIfStrong(&heap_object)) {
335 : VerifyHeapObjectImpl(heap_object);
336 : }
337 : }
338 : }
339 :
340 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
341 : VerifyPointersImpl(start, end);
342 : }
343 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
344 : VerifyPointersImpl(start, end);
345 : }
346 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
347 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
348 : VerifyHeapObjectImpl(target);
349 : }
350 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
351 : VerifyHeapObjectImpl(rinfo->target_object());
352 : }
353 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
354 : VerifyPointersImpl(start, end);
355 : }
356 : };
357 :
358 : } // namespace
359 : #endif // VERIFY_HEAP
360 :
361 : // =============================================================================
362 : // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
363 : // =============================================================================
364 :
365 : using MarkCompactMarkingVisitor =
366 : MarkingVisitor<FixedArrayVisitationMode::kRegular,
367 : TraceRetainingPathMode::kEnabled,
368 : MarkCompactCollector::MarkingState>;
369 :
370 : namespace {
371 :
372 320528 : int NumberOfAvailableCores() {
373 320528 : static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
374 : // This number of cores should be greater than zero and never change.
375 : DCHECK_GE(num_cores, 1);
376 : DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
377 320528 : return num_cores;
378 : }
379 :
380 : } // namespace
381 :
382 70969 : int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
383 : DCHECK_GT(pages, 0);
384 : int tasks =
385 70969 : FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
386 70969 : if (!heap_->CanExpandOldGeneration(
387 70969 : static_cast<size_t>(tasks * Page::kPageSize))) {
388 : // Optimize for memory usage near the heap limit.
389 : tasks = 1;
390 : }
391 70969 : return tasks;
392 : }
393 :
394 166509 : int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
395 : int slots) {
396 : DCHECK_GT(pages, 0);
397 : // Limit the number of update tasks as task creation often dominates the
398 : // actual work that is being done.
399 : const int kMaxPointerUpdateTasks = 8;
400 : const int kSlotsPerTask = 600;
401 : const int wanted_tasks =
402 166509 : (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
403 : return FLAG_parallel_pointer_update
404 : ? Min(kMaxPointerUpdateTasks,
405 166331 : Min(NumberOfAvailableCores(), wanted_tasks))
406 333018 : : 1;
407 : }
408 :
409 0 : int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
410 : int pages) {
411 : DCHECK_GT(pages, 0);
412 : // No cap needed because all pages we need to process are fully filled with
413 : // interesting objects.
414 83403 : return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
415 83492 : : 1;
416 : }
417 :
418 62883 : MarkCompactCollector::MarkCompactCollector(Heap* heap)
419 : : MarkCompactCollectorBase(heap),
420 : page_parallel_job_semaphore_(0),
421 : #ifdef DEBUG
422 : state_(IDLE),
423 : #endif
424 : was_marked_incrementally_(false),
425 : evacuation_(false),
426 : compacting_(false),
427 : black_allocation_(false),
428 : have_code_to_deoptimize_(false),
429 : marking_worklist_(heap),
430 125766 : sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
431 62883 : old_to_new_slots_ = -1;
432 62883 : }
433 :
434 251472 : MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
435 :
436 62883 : void MarkCompactCollector::SetUp() {
437 : DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
438 : DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
439 : DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
440 : DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
441 62883 : }
442 :
443 62866 : void MarkCompactCollector::TearDown() {
444 62866 : AbortCompaction();
445 62867 : AbortWeakObjects();
446 125736 : if (heap()->incremental_marking()->IsMarking()) {
447 536 : marking_worklist()->Clear();
448 : }
449 62868 : }
450 :
451 0 : void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
452 : DCHECK(!p->NeverEvacuate());
453 10384 : p->MarkEvacuationCandidate();
454 10384 : evacuation_candidates_.push_back(p);
455 0 : }
456 :
457 :
458 0 : static void TraceFragmentation(PagedSpace* space) {
459 0 : int number_of_pages = space->CountTotalPages();
460 0 : intptr_t reserved = (number_of_pages * space->AreaSize());
461 0 : intptr_t free = reserved - space->SizeOfObjects();
462 : PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
463 0 : static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
464 0 : }
465 :
466 84028 : bool MarkCompactCollector::StartCompaction() {
467 84028 : if (!compacting_) {
468 : DCHECK(evacuation_candidates_.empty());
469 :
470 168056 : CollectEvacuationCandidates(heap()->old_space());
471 :
472 84028 : if (FLAG_compact_code_space) {
473 84028 : CollectEvacuationCandidates(heap()->code_space());
474 0 : } else if (FLAG_trace_fragmentation) {
475 0 : TraceFragmentation(heap()->code_space());
476 : }
477 :
478 84028 : if (FLAG_trace_fragmentation) {
479 0 : TraceFragmentation(heap()->map_space());
480 : }
481 :
482 84028 : compacting_ = !evacuation_candidates_.empty();
483 : }
484 :
485 84028 : return compacting_;
486 : }
487 :
488 83492 : void MarkCompactCollector::CollectGarbage() {
489 : // Make sure that Prepare() has been called. The individual steps below will
490 : // update the state as they proceed.
491 : DCHECK(state_ == PREPARE_GC);
492 :
493 : #ifdef ENABLE_MINOR_MC
494 83492 : heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
495 : #endif // ENABLE_MINOR_MC
496 :
497 83492 : MarkLiveObjects();
498 83492 : ClearNonLiveReferences();
499 83492 : VerifyMarking();
500 :
501 83492 : RecordObjectStats();
502 :
503 83492 : StartSweepSpaces();
504 :
505 83492 : Evacuate();
506 :
507 83492 : Finish();
508 83492 : }
509 :
510 : #ifdef VERIFY_HEAP
511 : void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
512 : HeapObjectIterator iterator(space);
513 : for (HeapObject object = iterator.Next(); !object.is_null();
514 : object = iterator.Next()) {
515 : CHECK(non_atomic_marking_state()->IsBlack(object));
516 : }
517 : }
518 :
519 : void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
520 : for (Page* p : *space) {
521 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
522 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
523 : }
524 : }
525 :
526 :
527 : void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
528 : for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
529 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
530 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
531 : }
532 : }
533 :
534 : void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
535 : LargeObjectIterator it(space);
536 : for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
537 : CHECK(non_atomic_marking_state()->IsWhite(obj));
538 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
539 : MemoryChunk::FromHeapObject(obj)));
540 : }
541 : }
542 :
543 : void MarkCompactCollector::VerifyMarkbitsAreClean() {
544 : VerifyMarkbitsAreClean(heap_->old_space());
545 : VerifyMarkbitsAreClean(heap_->code_space());
546 : VerifyMarkbitsAreClean(heap_->map_space());
547 : VerifyMarkbitsAreClean(heap_->new_space());
548 : // Read-only space should always be black since we never collect any objects
549 : // in it or linked from it.
550 : VerifyMarkbitsAreDirty(heap_->read_only_space());
551 : VerifyMarkbitsAreClean(heap_->lo_space());
552 : VerifyMarkbitsAreClean(heap_->code_lo_space());
553 : VerifyMarkbitsAreClean(heap_->new_lo_space());
554 : }
555 :
556 : #endif // VERIFY_HEAP
557 :
558 188429 : void MarkCompactCollector::EnsureSweepingCompleted() {
559 376858 : if (!sweeper()->sweeping_in_progress()) return;
560 :
561 83492 : sweeper()->EnsureCompleted();
562 166984 : heap()->old_space()->RefillFreeList();
563 83492 : heap()->code_space()->RefillFreeList();
564 83492 : heap()->map_space()->RefillFreeList();
565 :
566 : #ifdef VERIFY_HEAP
567 : if (FLAG_verify_heap && !evacuation()) {
568 : FullEvacuationVerifier verifier(heap());
569 : verifier.Run();
570 : }
571 : #endif
572 : }
573 :
574 165370 : void MarkCompactCollector::ComputeEvacuationHeuristics(
575 : size_t area_size, int* target_fragmentation_percent,
576 : size_t* max_evacuated_bytes) {
577 : // For memory reducing and optimize for memory mode we directly define both
578 : // constants.
579 : const int kTargetFragmentationPercentForReduceMemory = 20;
580 : const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
581 : const int kTargetFragmentationPercentForOptimizeMemory = 20;
582 : const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
583 :
584 : // For regular mode (which is latency critical) we define less aggressive
585 : // defaults to start and switch to a trace-based (using compaction speed)
586 : // approach as soon as we have enough samples.
587 : const int kTargetFragmentationPercent = 70;
588 : const size_t kMaxEvacuatedBytes = 4 * MB;
589 : // Time to take for a single area (=payload of page). Used as soon as there
590 : // exist enough compaction speed samples.
591 : const float kTargetMsPerArea = .5;
592 :
593 468960 : if (heap()->ShouldReduceMemory()) {
594 27128 : *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
595 27128 : *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
596 138242 : } else if (heap()->ShouldOptimizeForMemoryUsage()) {
597 : *target_fragmentation_percent =
598 22 : kTargetFragmentationPercentForOptimizeMemory;
599 22 : *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
600 : } else {
601 : const double estimated_compaction_speed =
602 138220 : heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
603 138220 : if (estimated_compaction_speed != 0) {
604 : // Estimate the target fragmentation based on traced compaction speed
605 : // and a goal for a single page.
606 : const double estimated_ms_per_area =
607 107412 : 1 + area_size / estimated_compaction_speed;
608 : *target_fragmentation_percent = static_cast<int>(
609 107412 : 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
610 107412 : if (*target_fragmentation_percent <
611 : kTargetFragmentationPercentForReduceMemory) {
612 : *target_fragmentation_percent =
613 0 : kTargetFragmentationPercentForReduceMemory;
614 : }
615 : } else {
616 30808 : *target_fragmentation_percent = kTargetFragmentationPercent;
617 : }
618 138220 : *max_evacuated_bytes = kMaxEvacuatedBytes;
619 : }
620 165370 : }
621 :
622 336112 : void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
623 : DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
624 :
625 168056 : int number_of_pages = space->CountTotalPages();
626 168056 : size_t area_size = space->AreaSize();
627 :
628 : // Pairs of (live_bytes_in_page, page).
629 : typedef std::pair<size_t, Page*> LiveBytesPagePair;
630 : std::vector<LiveBytesPagePair> pages;
631 168056 : pages.reserve(number_of_pages);
632 :
633 : DCHECK(!sweeping_in_progress());
634 : Page* owner_of_linear_allocation_area =
635 : space->top() == space->limit()
636 : ? nullptr
637 168056 : : Page::FromAllocationAreaAddress(space->top());
638 516691 : for (Page* p : *space) {
639 604283 : if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
640 : !p->CanAllocate())
641 : continue;
642 : // Invariant: Evacuation candidates are just created when marking is
643 : // started. This means that sweeping has finished. Furthermore, at the end
644 : // of a GC all evacuation candidates are cleared and their slot buffers are
645 : // released.
646 127824 : CHECK(!p->IsEvacuationCandidate());
647 127824 : CHECK_NULL(p->slot_set<OLD_TO_OLD>());
648 127824 : CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
649 127824 : CHECK(p->SweepingDone());
650 : DCHECK(p->area_size() == area_size);
651 255648 : pages.push_back(std::make_pair(p->allocated_bytes(), p));
652 : }
653 :
654 : int candidate_count = 0;
655 : size_t total_live_bytes = 0;
656 :
657 168056 : const bool reduce_memory = heap()->ShouldReduceMemory();
658 168056 : if (FLAG_manual_evacuation_candidates_selection) {
659 1080 : for (size_t i = 0; i < pages.size(); i++) {
660 334 : Page* p = pages[i].second;
661 334 : if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
662 145 : candidate_count++;
663 145 : total_live_bytes += pages[i].first;
664 : p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
665 : AddEvacuationCandidate(p);
666 : }
667 : }
668 167644 : } else if (FLAG_stress_compaction_random) {
669 0 : double fraction = isolate()->fuzzer_rng()->NextDouble();
670 : size_t pages_to_mark_count =
671 0 : static_cast<size_t>(fraction * (pages.size() + 1));
672 0 : for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
673 0 : pages.size(), pages_to_mark_count)) {
674 0 : candidate_count++;
675 0 : total_live_bytes += pages[i].first;
676 0 : AddEvacuationCandidate(pages[i].second);
677 : }
678 167644 : } else if (FLAG_stress_compaction) {
679 11962 : for (size_t i = 0; i < pages.size(); i++) {
680 4844 : Page* p = pages[i].second;
681 4844 : if (i % 2 == 0) {
682 2982 : candidate_count++;
683 2982 : total_live_bytes += pages[i].first;
684 : AddEvacuationCandidate(p);
685 : }
686 : }
687 : } else {
688 : // The following approach determines the pages that should be evacuated.
689 : //
690 : // We use two conditions to decide whether a page qualifies as an evacuation
691 : // candidate, or not:
692 : // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
693 : // between live bytes and capacity of this page (= area).
694 : // * Evacuation quota: A global quota determining how much bytes should be
695 : // compacted.
696 : //
697 : // The algorithm sorts all pages by live bytes and then iterates through
698 : // them starting with the page with the most free memory, adding them to the
699 : // set of evacuation candidates as long as both conditions (fragmentation
700 : // and quota) hold.
701 : size_t max_evacuated_bytes;
702 : int target_fragmentation_percent;
703 : ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
704 165370 : &max_evacuated_bytes);
705 :
706 : const size_t free_bytes_threshold =
707 165370 : target_fragmentation_percent * (area_size / 100);
708 :
709 : // Sort pages from the most free to the least free, then select
710 : // the first n pages for evacuation such that:
711 : // - the total size of evacuated objects does not exceed the specified
712 : // limit.
713 : // - fragmentation of (n+1)-th page does not exceed the specified limit.
714 : std::sort(pages.begin(), pages.end(),
715 : [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
716 : return a.first < b.first;
717 165370 : });
718 576032 : for (size_t i = 0; i < pages.size(); i++) {
719 122646 : size_t live_bytes = pages[i].first;
720 : DCHECK_GE(area_size, live_bytes);
721 122646 : size_t free_bytes = area_size - live_bytes;
722 122646 : if (FLAG_always_compact ||
723 43842 : ((free_bytes >= free_bytes_threshold) &&
724 43842 : ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
725 43889 : candidate_count++;
726 43889 : total_live_bytes += live_bytes;
727 : }
728 122646 : if (FLAG_trace_fragmentation_verbose) {
729 : PrintIsolate(isolate(),
730 : "compaction-selection-page: space=%s free_bytes_page=%zu "
731 : "fragmentation_limit_kb=%" PRIuS
732 : " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
733 : "compaction_limit_kb=%zu\n",
734 : space->name(), free_bytes / KB, free_bytes_threshold / KB,
735 : target_fragmentation_percent, total_live_bytes / KB,
736 0 : max_evacuated_bytes / KB);
737 : }
738 : }
739 : // How many pages we will allocated for the evacuated objects
740 : // in the worst case: ceil(total_live_bytes / area_size)
741 : int estimated_new_pages =
742 165370 : static_cast<int>((total_live_bytes + area_size - 1) / area_size);
743 : DCHECK_LE(estimated_new_pages, candidate_count);
744 : int estimated_released_pages = candidate_count - estimated_new_pages;
745 : // Avoid (compact -> expand) cycles.
746 165370 : if ((estimated_released_pages == 0) && !FLAG_always_compact) {
747 : candidate_count = 0;
748 : }
749 172627 : for (int i = 0; i < candidate_count; i++) {
750 14514 : AddEvacuationCandidate(pages[i].second);
751 : }
752 : }
753 :
754 168056 : if (FLAG_trace_fragmentation) {
755 : PrintIsolate(isolate(),
756 : "compaction-selection: space=%s reduce_memory=%d pages=%d "
757 : "total_live_bytes=%zu\n",
758 : space->name(), reduce_memory, candidate_count,
759 0 : total_live_bytes / KB);
760 : }
761 168056 : }
762 :
763 :
764 62866 : void MarkCompactCollector::AbortCompaction() {
765 62866 : if (compacting_) {
766 1 : RememberedSet<OLD_TO_OLD>::ClearAll(heap());
767 4 : for (Page* p : evacuation_candidates_) {
768 : p->ClearEvacuationCandidate();
769 : }
770 1 : compacting_ = false;
771 : evacuation_candidates_.clear();
772 : }
773 : DCHECK(evacuation_candidates_.empty());
774 62866 : }
775 :
776 :
777 83492 : void MarkCompactCollector::Prepare() {
778 557372 : was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
779 :
780 : #ifdef DEBUG
781 : DCHECK(state_ == IDLE);
782 : state_ = PREPARE_GC;
783 : #endif
784 :
785 : DCHECK(!FLAG_never_compact || !FLAG_always_compact);
786 :
787 : // Instead of waiting we could also abort the sweeper threads here.
788 83492 : EnsureSweepingCompleted();
789 :
790 166984 : if (heap()->incremental_marking()->IsSweeping()) {
791 4021 : heap()->incremental_marking()->Stop();
792 : }
793 :
794 83492 : heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
795 :
796 83492 : if (!was_marked_incrementally_) {
797 225680 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
798 112840 : heap_->local_embedder_heap_tracer()->TracePrologue();
799 : }
800 :
801 : // Don't start compaction if we are in the middle of incremental
802 : // marking cycle. We did not collect any slots.
803 83492 : if (!FLAG_never_compact && !was_marked_incrementally_) {
804 56420 : StartCompaction();
805 : }
806 :
807 : PagedSpaces spaces(heap());
808 333968 : for (PagedSpace* space = spaces.next(); space != nullptr;
809 : space = spaces.next()) {
810 250476 : space->PrepareForMarkCompact();
811 : }
812 : heap()->account_external_memory_concurrently_freed();
813 :
814 : #ifdef VERIFY_HEAP
815 : if (!was_marked_incrementally_ && FLAG_verify_heap) {
816 : VerifyMarkbitsAreClean();
817 : }
818 : #endif
819 83492 : }
820 :
821 250523 : void MarkCompactCollector::FinishConcurrentMarking(
822 : ConcurrentMarking::StopRequest stop_request) {
823 : // FinishConcurrentMarking is called for both, concurrent and parallel,
824 : // marking. It is safe to call this function when tasks are already finished.
825 250523 : if (FLAG_parallel_marking || FLAG_concurrent_marking) {
826 496222 : heap()->concurrent_marking()->Stop(stop_request);
827 : heap()->concurrent_marking()->FlushMemoryChunkData(
828 496222 : non_atomic_marking_state());
829 : }
830 250523 : }
831 :
832 83492 : void MarkCompactCollector::VerifyMarking() {
833 83492 : CHECK(marking_worklist()->IsEmpty());
834 : DCHECK(heap_->incremental_marking()->IsStopped());
835 : #ifdef VERIFY_HEAP
836 : if (FLAG_verify_heap) {
837 : FullMarkingVerifier verifier(heap());
838 : verifier.Run();
839 : }
840 : #endif
841 : #ifdef VERIFY_HEAP
842 : if (FLAG_verify_heap) {
843 : heap()->old_space()->VerifyLiveBytes();
844 : heap()->map_space()->VerifyLiveBytes();
845 : heap()->code_space()->VerifyLiveBytes();
846 : }
847 : #endif
848 83492 : }
849 :
850 250476 : void MarkCompactCollector::Finish() {
851 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
852 :
853 83492 : epoch_++;
854 :
855 : #ifdef DEBUG
856 : heap()->VerifyCountersBeforeConcurrentSweeping();
857 : #endif
858 :
859 83492 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
860 83492 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
861 83492 : weak_objects_.next_ephemerons.Clear();
862 :
863 83492 : sweeper()->StartSweeperTasks();
864 83492 : sweeper()->StartIterabilityTasks();
865 :
866 : // Clear the marking state of live large objects.
867 166984 : heap_->lo_space()->ClearMarkingStateOfLiveObjects();
868 166984 : heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
869 :
870 : #ifdef DEBUG
871 : DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
872 : state_ = IDLE;
873 : #endif
874 166984 : heap_->isolate()->inner_pointer_to_code_cache()->Flush();
875 :
876 : // The stub caches are not traversed during GC; clear them to force
877 : // their lazy re-initialization. This must be done after the
878 : // GC, because it relies on the new address of certain old space
879 : // objects (empty string, illegal builtin).
880 83492 : isolate()->load_stub_cache()->Clear();
881 83492 : isolate()->store_stub_cache()->Clear();
882 :
883 83492 : if (have_code_to_deoptimize_) {
884 : // Some code objects were marked for deoptimization during the GC.
885 52 : Deoptimizer::DeoptimizeMarkedCode(isolate());
886 52 : have_code_to_deoptimize_ = false;
887 83492 : }
888 83492 : }
889 :
890 83492 : class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
891 : public:
892 : explicit RootMarkingVisitor(MarkCompactCollector* collector)
893 83492 : : collector_(collector) {}
894 :
895 254061894 : void VisitRootPointer(Root root, const char* description,
896 : FullObjectSlot p) final {
897 : MarkObjectByPointer(root, p);
898 254061894 : }
899 :
900 1917881 : void VisitRootPointers(Root root, const char* description,
901 : FullObjectSlot start, FullObjectSlot end) final {
902 40265064 : for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
903 1917881 : }
904 :
905 : private:
906 : V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
907 580982392 : if (!(*p)->IsHeapObject()) return;
908 :
909 572970688 : collector_->MarkRootObject(root, HeapObject::cast(*p));
910 : }
911 :
912 : MarkCompactCollector* const collector_;
913 : };
914 :
915 : // This visitor is used to visit the body of special objects held alive by
916 : // other roots.
917 : //
918 : // It is currently used for
919 : // - Code held alive by the top optimized frame. This code cannot be deoptimized
920 : // and thus have to be kept alive in an isolate way, i.e., it should not keep
921 : // alive other code objects reachable through the weak list but they should
922 : // keep alive its embedded pointers (which would otherwise be dropped).
923 : // - Prefix of the string table.
924 83492 : class MarkCompactCollector::CustomRootBodyMarkingVisitor final
925 : : public ObjectVisitor {
926 : public:
927 : explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
928 83492 : : collector_(collector) {}
929 :
930 0 : void VisitPointer(HeapObject host, ObjectSlot p) final {
931 : MarkObject(host, *p);
932 0 : }
933 :
934 84615 : void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
935 590806 : for (ObjectSlot p = start; p < end; ++p) {
936 : DCHECK(!HasWeakHeapObjectTag(*p));
937 : MarkObject(host, *p);
938 : }
939 84615 : }
940 :
941 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
942 : MaybeObjectSlot end) final {
943 : // At the moment, custom roots cannot contain weak pointers.
944 0 : UNREACHABLE();
945 : }
946 :
947 : // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
948 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
949 0 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
950 : MarkObject(host, target);
951 0 : }
952 7730 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
953 : MarkObject(host, rinfo->target_object());
954 7730 : }
955 :
956 : private:
957 : V8_INLINE void MarkObject(HeapObject host, Object object) {
958 858612 : if (!object->IsHeapObject()) return;
959 96842 : collector_->MarkObject(host, HeapObject::cast(object));
960 : }
961 :
962 : MarkCompactCollector* const collector_;
963 : };
964 :
965 83492 : class InternalizedStringTableCleaner : public ObjectVisitor {
966 : public:
967 : InternalizedStringTableCleaner(Heap* heap, HeapObject table)
968 83492 : : heap_(heap), pointers_removed_(0), table_(table) {}
969 :
970 83492 : void VisitPointers(HeapObject host, ObjectSlot start,
971 : ObjectSlot end) override {
972 : // Visit all HeapObject pointers in [start, end).
973 83492 : Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
974 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
975 : heap_->mark_compact_collector()->non_atomic_marking_state();
976 219483208 : for (ObjectSlot p = start; p < end; ++p) {
977 219316224 : Object o = *p;
978 219316224 : if (o->IsHeapObject()) {
979 : HeapObject heap_object = HeapObject::cast(o);
980 219316224 : if (marking_state->IsWhite(heap_object)) {
981 4504215 : pointers_removed_++;
982 : // Set the entry to the_hole_value (as deleted).
983 : p.store(the_hole);
984 : } else {
985 : // StringTable contains only old space strings.
986 : DCHECK(!Heap::InNewSpace(o));
987 : MarkCompactCollector::RecordSlot(table_, p, heap_object);
988 : }
989 : }
990 : }
991 83492 : }
992 :
993 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
994 : MaybeObjectSlot end) final {
995 0 : UNREACHABLE();
996 : }
997 :
998 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
999 :
1000 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
1001 0 : UNREACHABLE();
1002 : }
1003 :
1004 : int PointersRemoved() {
1005 : return pointers_removed_;
1006 : }
1007 :
1008 : private:
1009 : Heap* heap_;
1010 : int pointers_removed_;
1011 : HeapObject table_;
1012 : };
1013 :
1014 83492 : class ExternalStringTableCleaner : public RootVisitor {
1015 : public:
1016 83492 : explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1017 :
1018 83472 : void VisitRootPointers(Root root, const char* description,
1019 : FullObjectSlot start, FullObjectSlot end) override {
1020 : // Visit all HeapObject pointers in [start, end).
1021 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
1022 : heap_->mark_compact_collector()->non_atomic_marking_state();
1023 83472 : Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
1024 288046 : for (FullObjectSlot p = start; p < end; ++p) {
1025 121102 : Object o = *p;
1026 121102 : if (o->IsHeapObject()) {
1027 : HeapObject heap_object = HeapObject::cast(o);
1028 121102 : if (marking_state->IsWhite(heap_object)) {
1029 1615 : if (o->IsExternalString()) {
1030 1615 : heap_->FinalizeExternalString(String::cast(o));
1031 : } else {
1032 : // The original external string may have been internalized.
1033 : DCHECK(o->IsThinString());
1034 : }
1035 : // Set the entry to the_hole_value (as deleted).
1036 : p.store(the_hole);
1037 : }
1038 : }
1039 : }
1040 83472 : }
1041 :
1042 : private:
1043 : Heap* heap_;
1044 : };
1045 :
1046 : // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1047 : // are retained.
1048 83492 : class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1049 : public:
1050 : explicit MarkCompactWeakObjectRetainer(
1051 : MarkCompactCollector::NonAtomicMarkingState* marking_state)
1052 83492 : : marking_state_(marking_state) {}
1053 :
1054 4455032 : Object RetainAs(Object object) override {
1055 : HeapObject heap_object = HeapObject::cast(object);
1056 : DCHECK(!marking_state_->IsGrey(heap_object));
1057 4455032 : if (marking_state_->IsBlack(heap_object)) {
1058 4235483 : return object;
1059 365758 : } else if (object->IsAllocationSite() &&
1060 : !(AllocationSite::cast(object)->IsZombie())) {
1061 : // "dead" AllocationSites need to live long enough for a traversal of new
1062 : // space. These sites get a one-time reprieve.
1063 :
1064 76870 : Object nested = object;
1065 233923 : while (nested->IsAllocationSite()) {
1066 80183 : AllocationSite current_site = AllocationSite::cast(nested);
1067 : // MarkZombie will override the nested_site, read it first before
1068 : // marking
1069 80183 : nested = current_site->nested_site();
1070 80183 : current_site->MarkZombie();
1071 : marking_state_->WhiteToBlack(current_site);
1072 : }
1073 :
1074 76870 : return object;
1075 : } else {
1076 142679 : return Object();
1077 : }
1078 : }
1079 :
1080 : private:
1081 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1082 : };
1083 :
1084 70969 : class RecordMigratedSlotVisitor : public ObjectVisitor {
1085 : public:
1086 : explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1087 73559 : : collector_(collector) {}
1088 :
1089 0 : inline void VisitPointer(HeapObject host, ObjectSlot p) final {
1090 : DCHECK(!HasWeakHeapObjectTag(*p));
1091 225135474 : RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
1092 0 : }
1093 :
1094 0 : inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1095 79078420 : RecordMigratedSlot(host, *p, p.address());
1096 0 : }
1097 :
1098 598371 : inline void VisitPointers(HeapObject host, ObjectSlot start,
1099 : ObjectSlot end) final {
1100 126061165 : while (start < end) {
1101 : VisitPointer(host, start);
1102 : ++start;
1103 : }
1104 598340 : }
1105 :
1106 0 : inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
1107 : MaybeObjectSlot end) final {
1108 41171404 : while (start < end) {
1109 : VisitPointer(host, start);
1110 : ++start;
1111 : }
1112 0 : }
1113 :
1114 290 : inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1115 : DCHECK_EQ(host, rinfo->host());
1116 : DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1117 290 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1118 : // The target is always in old space, we don't have to record the slot in
1119 : // the old-to-new remembered set.
1120 : DCHECK(!Heap::InNewSpace(target));
1121 290 : collector_->RecordRelocSlot(host, rinfo, target);
1122 290 : }
1123 :
1124 46194 : inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1125 : DCHECK_EQ(host, rinfo->host());
1126 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1127 46194 : HeapObject object = HeapObject::cast(rinfo->target_object());
1128 46194 : GenerationalBarrierForCode(host, rinfo, object);
1129 46194 : collector_->RecordRelocSlot(host, rinfo, object);
1130 46194 : }
1131 :
1132 : // Entries that are skipped for recording.
1133 0 : inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
1134 0 : inline void VisitExternalReference(Foreign host, Address* p) final {}
1135 26641 : inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
1136 0 : inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1137 :
1138 : protected:
1139 151760820 : inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
1140 : Address slot) {
1141 151760820 : if (value->IsStrongOrWeak()) {
1142 : Page* p = Page::FromAddress(value.ptr());
1143 234655822 : if (p->InNewSpace()) {
1144 : DCHECK_IMPLIES(p->InToSpace(),
1145 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1146 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1147 2946459 : MemoryChunk::FromHeapObject(host), slot);
1148 114381452 : } else if (p->IsEvacuationCandidate()) {
1149 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1150 13204658 : MemoryChunk::FromHeapObject(host), slot);
1151 : }
1152 : }
1153 151760671 : }
1154 :
1155 : MarkCompactCollector* collector_;
1156 : };
1157 :
1158 : class MigrationObserver {
1159 : public:
1160 70969 : explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1161 :
1162 0 : virtual ~MigrationObserver() = default;
1163 : virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1164 : int size) = 0;
1165 :
1166 : protected:
1167 : Heap* heap_;
1168 : };
1169 :
1170 0 : class ProfilingMigrationObserver final : public MigrationObserver {
1171 : public:
1172 70969 : explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1173 :
1174 502466 : inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1175 : int size) final {
1176 690658 : if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1177 416 : PROFILE(heap_->isolate(),
1178 : CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1179 : }
1180 502364 : heap_->OnMoveEvent(dst, src, size);
1181 501812 : }
1182 : };
1183 :
1184 342245 : class HeapObjectVisitor {
1185 : public:
1186 342245 : virtual ~HeapObjectVisitor() = default;
1187 : virtual bool Visit(HeapObject object, int size) = 0;
1188 : };
1189 :
1190 171100 : class EvacuateVisitorBase : public HeapObjectVisitor {
1191 : public:
1192 : void AddObserver(MigrationObserver* observer) {
1193 1616 : migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1194 1616 : observers_.push_back(observer);
1195 : }
1196 :
1197 : protected:
1198 : enum MigrationMode { kFast, kObserved };
1199 :
1200 : typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
1201 : HeapObject src, int size,
1202 : AllocationSpace dest);
1203 :
1204 : template <MigrationMode mode>
1205 68710260 : static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
1206 : HeapObject src, int size, AllocationSpace dest) {
1207 : Address dst_addr = dst->address();
1208 : Address src_addr = src->address();
1209 : DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1210 : DCHECK_NE(dest, LO_SPACE);
1211 : DCHECK_NE(dest, CODE_LO_SPACE);
1212 68710260 : if (dest == OLD_SPACE) {
1213 : DCHECK_OBJECT_SIZE(size);
1214 : DCHECK(IsAligned(size, kTaggedSize));
1215 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1216 : if (mode != MigrationMode::kFast)
1217 : base->ExecuteMigrationObservers(dest, src, dst, size);
1218 44168726 : dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1219 24758893 : } else if (dest == CODE_SPACE) {
1220 : DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1221 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1222 3166 : Code::cast(dst)->Relocate(dst_addr - src_addr);
1223 : if (mode != MigrationMode::kFast)
1224 : base->ExecuteMigrationObservers(dest, src, dst, size);
1225 1583 : dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1226 : } else {
1227 : DCHECK_OBJECT_SIZE(size);
1228 : DCHECK(dest == NEW_SPACE);
1229 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1230 : if (mode != MigrationMode::kFast)
1231 : base->ExecuteMigrationObservers(dest, src, dst, size);
1232 : }
1233 : src->set_map_word(MapWord::FromForwardingAddress(dst));
1234 69232394 : }
1235 :
1236 : EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1237 : RecordMigratedSlotVisitor* record_visitor)
1238 : : heap_(heap),
1239 : local_allocator_(local_allocator),
1240 171100 : record_visitor_(record_visitor) {
1241 171100 : migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1242 : }
1243 :
1244 44367518 : inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
1245 : int size, HeapObject* target_object) {
1246 : #ifdef VERIFY_HEAP
1247 : if (AbortCompactionForTesting(object)) return false;
1248 : #endif // VERIFY_HEAP
1249 : AllocationAlignment alignment =
1250 : HeapObject::RequiredAlignment(object->map());
1251 : AllocationResult allocation =
1252 44367518 : local_allocator_->Allocate(target_space, size, alignment);
1253 43965226 : if (allocation.To(target_object)) {
1254 : MigrateObject(*target_object, object, size, target_space);
1255 44494439 : return true;
1256 : }
1257 : return false;
1258 : }
1259 :
1260 : inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
1261 : HeapObject dst, int size) {
1262 1507602 : for (MigrationObserver* obs : observers_) {
1263 502595 : obs->Move(dest, src, dst, size);
1264 : }
1265 : }
1266 :
1267 : inline void MigrateObject(HeapObject dst, HeapObject src, int size,
1268 : AllocationSpace dest) {
1269 67581753 : migration_function_(this, dst, src, size, dest);
1270 : }
1271 :
1272 : #ifdef VERIFY_HEAP
1273 : bool AbortCompactionForTesting(HeapObject object) {
1274 : if (FLAG_stress_compaction) {
1275 : const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1276 : kPageAlignmentMask & ~kObjectAlignmentMask;
1277 : if ((object->ptr() & kPageAlignmentMask) == mask) {
1278 : Page* page = Page::FromHeapObject(object);
1279 : if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1280 : page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1281 : } else {
1282 : page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1283 : return true;
1284 : }
1285 : }
1286 : }
1287 : return false;
1288 : }
1289 : #endif // VERIFY_HEAP
1290 :
1291 : Heap* heap_;
1292 : LocalAllocator* local_allocator_;
1293 : RecordMigratedSlotVisitor* record_visitor_;
1294 : std::vector<MigrationObserver*> observers_;
1295 : MigrateFunction migration_function_;
1296 : };
1297 :
1298 171100 : class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1299 : public:
1300 85550 : explicit EvacuateNewSpaceVisitor(
1301 85550 : Heap* heap, LocalAllocator* local_allocator,
1302 : RecordMigratedSlotVisitor* record_visitor,
1303 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1304 : : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1305 : buffer_(LocalAllocationBuffer::InvalidBuffer()),
1306 : promoted_size_(0),
1307 : semispace_copied_size_(0),
1308 : local_pretenuring_feedback_(local_pretenuring_feedback),
1309 256650 : is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
1310 :
1311 37311327 : inline bool Visit(HeapObject object, int size) override {
1312 37311327 : if (TryEvacuateWithoutCopy(object)) return true;
1313 35923713 : HeapObject target_object;
1314 47208196 : if (heap_->ShouldBePromoted(object->address()) &&
1315 11284212 : TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1316 11284498 : promoted_size_ += size;
1317 11284498 : return true;
1318 : }
1319 : heap_->UpdateAllocationSite(object->map(), object,
1320 49636878 : local_pretenuring_feedback_);
1321 24809622 : HeapObject target;
1322 24809622 : AllocationSpace space = AllocateTargetObject(object, size, &target);
1323 24753682 : MigrateObject(HeapObject::cast(target), object, size, space);
1324 24803049 : semispace_copied_size_ += size;
1325 24803049 : return true;
1326 : }
1327 :
1328 : intptr_t promoted_size() { return promoted_size_; }
1329 : intptr_t semispace_copied_size() { return semispace_copied_size_; }
1330 :
1331 : private:
1332 37311596 : inline bool TryEvacuateWithoutCopy(HeapObject object) {
1333 37311596 : if (is_incremental_marking_) return false;
1334 :
1335 : Map map = object->map();
1336 :
1337 : // Some objects can be evacuated without creating a copy.
1338 37312959 : if (map->visitor_id() == kVisitThinString) {
1339 : HeapObject actual = ThinString::cast(object)->unchecked_actual();
1340 1416931 : if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1341 : object->map_slot().Relaxed_Store(
1342 : MapWord::FromForwardingAddress(actual).ToMap());
1343 1413426 : return true;
1344 : }
1345 : // TODO(mlippautz): Handle ConsString.
1346 :
1347 : return false;
1348 : }
1349 :
1350 24808335 : inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
1351 20 : HeapObject* target_object) {
1352 : AllocationAlignment alignment =
1353 : HeapObject::RequiredAlignment(old_object->map());
1354 : AllocationSpace space_allocated_in = NEW_SPACE;
1355 : AllocationResult allocation =
1356 24808335 : local_allocator_->Allocate(NEW_SPACE, size, alignment);
1357 24765783 : if (allocation.IsRetry()) {
1358 20 : allocation = AllocateInOldSpace(size, alignment);
1359 : space_allocated_in = OLD_SPACE;
1360 : }
1361 24765783 : bool ok = allocation.To(target_object);
1362 : DCHECK(ok);
1363 : USE(ok);
1364 24761916 : return space_allocated_in;
1365 : }
1366 :
1367 20 : inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1368 : AllocationAlignment alignment) {
1369 : AllocationResult allocation =
1370 20 : local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1371 20 : if (allocation.IsRetry()) {
1372 : heap_->FatalProcessOutOfMemory(
1373 0 : "MarkCompactCollector: semi-space copy, fallback in old gen");
1374 : }
1375 20 : return allocation;
1376 : }
1377 :
1378 : LocalAllocationBuffer buffer_;
1379 : intptr_t promoted_size_;
1380 : intptr_t semispace_copied_size_;
1381 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1382 : bool is_incremental_marking_;
1383 : };
1384 :
1385 : template <PageEvacuationMode mode>
1386 171100 : class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1387 : public:
1388 : explicit EvacuateNewSpacePageVisitor(
1389 : Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1390 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1391 : : heap_(heap),
1392 : record_visitor_(record_visitor),
1393 : moved_bytes_(0),
1394 171100 : local_pretenuring_feedback_(local_pretenuring_feedback) {}
1395 :
1396 2047 : static void Move(Page* page) {
1397 : switch (mode) {
1398 : case NEW_TO_NEW:
1399 2047 : page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1400 : page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1401 : break;
1402 : case NEW_TO_OLD: {
1403 582 : page->heap()->new_space()->from_space().RemovePage(page);
1404 582 : Page* new_page = Page::ConvertNewToOld(page);
1405 : DCHECK(!new_page->InNewSpace());
1406 : new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1407 : break;
1408 : }
1409 : }
1410 2047 : }
1411 :
1412 4731611 : inline bool Visit(HeapObject object, int size) override {
1413 : if (mode == NEW_TO_NEW) {
1414 4731611 : heap_->UpdateAllocationSite(object->map(), object,
1415 9463222 : local_pretenuring_feedback_);
1416 : } else if (mode == NEW_TO_OLD) {
1417 2189394 : object->IterateBodyFast(record_visitor_);
1418 : }
1419 4706620 : return true;
1420 : }
1421 :
1422 : intptr_t moved_bytes() { return moved_bytes_; }
1423 2047 : void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1424 :
1425 : private:
1426 : Heap* heap_;
1427 : RecordMigratedSlotVisitor* record_visitor_;
1428 : intptr_t moved_bytes_;
1429 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1430 : };
1431 :
1432 85550 : class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1433 : public:
1434 : EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1435 : RecordMigratedSlotVisitor* record_visitor)
1436 85550 : : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1437 :
1438 33817935 : inline bool Visit(HeapObject object, int size) override {
1439 33817935 : HeapObject target_object;
1440 33817935 : if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
1441 33817935 : object, size, &target_object)) {
1442 : DCHECK(object->map_word().IsForwardingAddress());
1443 : return true;
1444 : }
1445 45 : return false;
1446 : }
1447 : };
1448 :
1449 45 : class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1450 : public:
1451 45 : explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1452 :
1453 0 : inline bool Visit(HeapObject object, int size) override {
1454 2590 : RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1455 2590 : object->IterateBodyFast(&visitor);
1456 0 : return true;
1457 : }
1458 :
1459 : private:
1460 : Heap* heap_;
1461 : };
1462 :
1463 12379928 : bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
1464 12379928 : Object o = *p;
1465 12379928 : if (!o->IsHeapObject()) return false;
1466 : HeapObject heap_object = HeapObject::cast(o);
1467 : return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1468 12379928 : heap_object);
1469 : }
1470 :
1471 83492 : void MarkCompactCollector::MarkStringTable(
1472 : ObjectVisitor* custom_root_body_visitor) {
1473 83492 : StringTable string_table = heap()->string_table();
1474 : // Mark the string table itself.
1475 83492 : if (marking_state()->WhiteToBlack(string_table)) {
1476 : // Explicitly mark the prefix.
1477 83116 : string_table->IteratePrefix(custom_root_body_visitor);
1478 : }
1479 83492 : }
1480 :
1481 83492 : void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1482 : ObjectVisitor* custom_root_body_visitor) {
1483 : // Mark the heap roots including global variables, stack variables,
1484 : // etc., and all objects reachable from them.
1485 83492 : heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1486 :
1487 : // Custom marking for string table and top optimized frame.
1488 83492 : MarkStringTable(custom_root_body_visitor);
1489 83492 : ProcessTopOptimizedFrame(custom_root_body_visitor);
1490 83492 : }
1491 :
1492 166984 : void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1493 : bool work_to_do = true;
1494 : int iterations = 0;
1495 166984 : int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1496 :
1497 500999 : while (work_to_do) {
1498 167031 : PerformWrapperTracing();
1499 :
1500 167031 : if (iterations >= max_iterations) {
1501 : // Give up fixpoint iteration and switch to linear algorithm.
1502 0 : ProcessEphemeronsLinear();
1503 0 : break;
1504 : }
1505 :
1506 : // Move ephemerons from next_ephemerons into current_ephemerons to
1507 : // drain them in this iteration.
1508 167031 : weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1509 668065 : heap()->concurrent_marking()->set_ephemeron_marked(false);
1510 :
1511 : {
1512 668124 : TRACE_GC(heap()->tracer(),
1513 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1514 :
1515 167031 : if (FLAG_parallel_marking) {
1516 165423 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1517 : }
1518 :
1519 167031 : work_to_do = ProcessEphemerons();
1520 : FinishConcurrentMarking(
1521 334062 : ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1522 : }
1523 :
1524 167031 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
1525 167031 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1526 :
1527 334038 : work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
1528 334003 : heap()->concurrent_marking()->ephemeron_marked() ||
1529 500999 : !marking_worklist()->IsEmbedderEmpty() ||
1530 166984 : !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1531 167031 : ++iterations;
1532 : }
1533 :
1534 166984 : CHECK(marking_worklist()->IsEmpty());
1535 166984 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
1536 166984 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1537 166984 : }
1538 :
1539 167031 : bool MarkCompactCollector::ProcessEphemerons() {
1540 167031 : Ephemeron ephemeron;
1541 : bool ephemeron_marked = false;
1542 :
1543 : // Drain current_ephemerons and push ephemerons where key and value are still
1544 : // unreachable into next_ephemerons.
1545 334072 : while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1546 10 : if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1547 : ephemeron_marked = true;
1548 : }
1549 : }
1550 :
1551 : // Drain marking worklist and push discovered ephemerons into
1552 : // discovered_ephemerons.
1553 : ProcessMarkingWorklist();
1554 :
1555 : // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
1556 : // before) and push ephemerons where key and value are still unreachable into
1557 : // next_ephemerons.
1558 167116 : while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1559 85 : if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1560 : ephemeron_marked = true;
1561 : }
1562 : }
1563 :
1564 : // Flush local ephemerons for main task to global pool.
1565 167031 : weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
1566 167031 : weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1567 :
1568 167031 : return ephemeron_marked;
1569 : }
1570 :
1571 0 : void MarkCompactCollector::ProcessEphemeronsLinear() {
1572 0 : TRACE_GC(heap()->tracer(),
1573 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1574 0 : CHECK(heap()->concurrent_marking()->IsStopped());
1575 0 : std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
1576 0 : Ephemeron ephemeron;
1577 :
1578 : DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1579 0 : weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1580 :
1581 0 : while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1582 0 : VisitEphemeron(ephemeron.key, ephemeron.value);
1583 :
1584 0 : if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1585 0 : key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1586 : }
1587 : }
1588 :
1589 0 : ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1590 : bool work_to_do = true;
1591 :
1592 0 : while (work_to_do) {
1593 0 : PerformWrapperTracing();
1594 :
1595 : ResetNewlyDiscovered();
1596 0 : ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1597 :
1598 : {
1599 0 : TRACE_GC(heap()->tracer(),
1600 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1601 : // Drain marking worklist and push all discovered objects into
1602 : // newly_discovered.
1603 : ProcessMarkingWorklistInternal<
1604 : MarkCompactCollector::MarkingWorklistProcessingMode::
1605 0 : kTrackNewlyDiscoveredObjects>();
1606 : }
1607 :
1608 0 : while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1609 0 : VisitEphemeron(ephemeron.key, ephemeron.value);
1610 :
1611 0 : if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1612 0 : key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1613 : }
1614 : }
1615 :
1616 0 : if (ephemeron_marking_.newly_discovered_overflowed) {
1617 : // If newly_discovered was overflowed just visit all ephemerons in
1618 : // next_ephemerons.
1619 0 : weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1620 0 : if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1621 : non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1622 0 : marking_worklist()->Push(ephemeron.value);
1623 : }
1624 0 : });
1625 :
1626 : } else {
1627 : // This is the good case: newly_discovered stores all discovered
1628 : // objects. Now use key_to_values to see if discovered objects keep more
1629 : // objects alive due to ephemeron semantics.
1630 0 : for (HeapObject object : ephemeron_marking_.newly_discovered) {
1631 : auto range = key_to_values.equal_range(object);
1632 0 : for (auto it = range.first; it != range.second; ++it) {
1633 0 : HeapObject value = it->second;
1634 : MarkObject(object, value);
1635 : }
1636 : }
1637 : }
1638 :
1639 : // Do NOT drain marking worklist here, otherwise the current checks
1640 : // for work_to_do are not sufficient for determining if another iteration
1641 : // is necessary.
1642 :
1643 0 : work_to_do = !marking_worklist()->IsEmpty() ||
1644 0 : !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1645 0 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1646 : }
1647 :
1648 : ResetNewlyDiscovered();
1649 0 : ephemeron_marking_.newly_discovered.shrink_to_fit();
1650 :
1651 0 : CHECK(marking_worklist()->IsEmpty());
1652 0 : }
1653 :
1654 250523 : void MarkCompactCollector::PerformWrapperTracing() {
1655 250763 : if (heap_->local_embedder_heap_tracer()->InUse()) {
1656 480 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
1657 : {
1658 : LocalEmbedderHeapTracer::ProcessingScope scope(
1659 240 : heap_->local_embedder_heap_tracer());
1660 120 : HeapObject object;
1661 270 : while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
1662 30 : scope.TracePossibleWrapper(JSObject::cast(object));
1663 120 : }
1664 : }
1665 : heap_->local_embedder_heap_tracer()->Trace(
1666 360 : std::numeric_limits<double>::infinity());
1667 : }
1668 250523 : }
1669 :
1670 0 : void MarkCompactCollector::ProcessMarkingWorklist() {
1671 : ProcessMarkingWorklistInternal<
1672 584491 : MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
1673 0 : }
1674 :
1675 : template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
1676 584491 : void MarkCompactCollector::ProcessMarkingWorklistInternal() {
1677 : HeapObject object;
1678 584491 : MarkCompactMarkingVisitor visitor(this, marking_state());
1679 157652354 : while (!(object = marking_worklist()->Pop()).is_null()) {
1680 : DCHECK(!object->IsFiller());
1681 : DCHECK(object->IsHeapObject());
1682 : DCHECK(heap()->Contains(object));
1683 : DCHECK(!(marking_state()->IsWhite(object)));
1684 : marking_state()->GreyToBlack(object);
1685 : if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1686 : kTrackNewlyDiscoveredObjects) {
1687 0 : AddNewlyDiscovered(object);
1688 : }
1689 : Map map = object->map();
1690 : MarkObject(object, map);
1691 : visitor.Visit(map, object);
1692 : }
1693 584491 : }
1694 :
1695 95 : bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
1696 95 : if (marking_state()->IsBlackOrGrey(key)) {
1697 16 : if (marking_state()->WhiteToGrey(value)) {
1698 : marking_worklist()->Push(value);
1699 16 : return true;
1700 : }
1701 :
1702 79 : } else if (marking_state()->IsWhite(value)) {
1703 79 : weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
1704 : }
1705 :
1706 : return false;
1707 : }
1708 :
1709 166984 : void MarkCompactCollector::ProcessEphemeronMarking() {
1710 : DCHECK(marking_worklist()->IsEmpty());
1711 :
1712 : // Incremental marking might leave ephemerons in main task's local
1713 : // buffer, flush it into global pool.
1714 166984 : weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1715 :
1716 166984 : ProcessEphemeronsUntilFixpoint();
1717 :
1718 166984 : CHECK(marking_worklist()->IsEmpty());
1719 166984 : CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1720 166984 : }
1721 :
1722 83492 : void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1723 259357 : for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1724 92373 : !it.done(); it.Advance()) {
1725 141031 : if (it.frame()->type() == StackFrame::INTERPRETED) {
1726 : return;
1727 : }
1728 102268 : if (it.frame()->type() == StackFrame::OPTIMIZED) {
1729 9895 : Code code = it.frame()->LookupCode();
1730 19790 : if (!code->CanDeoptAt(it.frame()->pc())) {
1731 1499 : Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
1732 : }
1733 : return;
1734 : }
1735 : }
1736 : }
1737 :
1738 83492 : void MarkCompactCollector::RecordObjectStats() {
1739 83492 : if (V8_UNLIKELY(FLAG_gc_stats)) {
1740 0 : heap()->CreateObjectStats();
1741 : ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
1742 0 : heap()->dead_object_stats_);
1743 0 : collector.Collect();
1744 0 : if (V8_UNLIKELY(FLAG_gc_stats &
1745 : v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1746 0 : std::stringstream live, dead;
1747 0 : heap()->live_object_stats_->Dump(live);
1748 0 : heap()->dead_object_stats_->Dump(dead);
1749 0 : TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1750 : "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1751 : "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1752 0 : TRACE_STR_COPY(dead.str().c_str()));
1753 : }
1754 0 : if (FLAG_trace_gc_object_stats) {
1755 0 : heap()->live_object_stats_->PrintJSON("live");
1756 0 : heap()->dead_object_stats_->PrintJSON("dead");
1757 : }
1758 0 : heap()->live_object_stats_->CheckpointObjectStats();
1759 0 : heap()->dead_object_stats_->ClearObjectStats();
1760 : }
1761 83492 : }
1762 :
1763 83492 : void MarkCompactCollector::MarkLiveObjects() {
1764 1362944 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1765 : // The recursive GC marker detects when it is nearing stack overflow,
1766 : // and switches to a different marking system. JS interrupts interfere
1767 : // with the C stack limit check.
1768 : PostponeInterruptsScope postpone(isolate());
1769 :
1770 : {
1771 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1772 333164 : IncrementalMarking* incremental_marking = heap_->incremental_marking();
1773 83492 : if (was_marked_incrementally_) {
1774 27072 : incremental_marking->Finalize();
1775 : } else {
1776 56420 : CHECK(incremental_marking->IsStopped());
1777 83492 : }
1778 : }
1779 :
1780 : #ifdef DEBUG
1781 : DCHECK(state_ == PREPARE_GC);
1782 : state_ = MARK_LIVE_OBJECTS;
1783 : #endif
1784 :
1785 166984 : heap_->local_embedder_heap_tracer()->EnterFinalPause();
1786 :
1787 : RootMarkingVisitor root_visitor(this);
1788 :
1789 : {
1790 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1791 : CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1792 166984 : MarkRoots(&root_visitor, &custom_root_body_visitor);
1793 : }
1794 :
1795 : {
1796 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1797 83492 : if (FLAG_parallel_marking) {
1798 165376 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1799 : }
1800 : ProcessMarkingWorklist();
1801 :
1802 : FinishConcurrentMarking(
1803 83492 : ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1804 83492 : ProcessMarkingWorklist();
1805 : }
1806 :
1807 : {
1808 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1809 :
1810 : DCHECK(marking_worklist()->IsEmpty());
1811 :
1812 : // Mark objects reachable through the embedder heap. This phase is
1813 : // opportunistic as it may not discover graphs that are only reachable
1814 : // through ephemerons.
1815 : {
1816 333968 : TRACE_GC(heap()->tracer(),
1817 : GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
1818 83492 : do {
1819 : // PerformWrapperTracing() also empties the work items collected by
1820 : // concurrent markers. As a result this call needs to happen at least
1821 : // once.
1822 83492 : PerformWrapperTracing();
1823 : ProcessMarkingWorklist();
1824 250476 : } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
1825 83492 : !marking_worklist()->IsEmbedderEmpty());
1826 : DCHECK(marking_worklist()->IsEmbedderEmpty());
1827 83492 : DCHECK(marking_worklist()->IsEmpty());
1828 : }
1829 :
1830 : // The objects reachable from the roots are marked, yet unreachable objects
1831 : // are unmarked. Mark objects reachable due to embedder heap tracing or
1832 : // harmony weak maps.
1833 : {
1834 333968 : TRACE_GC(heap()->tracer(),
1835 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1836 83492 : ProcessEphemeronMarking();
1837 83492 : DCHECK(marking_worklist()->IsEmpty());
1838 : }
1839 :
1840 : // The objects reachable from the roots, weak maps, and embedder heap
1841 : // tracing are marked. Objects pointed to only by weak global handles cannot
1842 : // be immediately reclaimed. Instead, we have to mark them as pending and
1843 : // mark objects reachable from them.
1844 : //
1845 : // First we identify nonlive weak handles and mark them as pending
1846 : // destruction.
1847 : {
1848 333968 : TRACE_GC(heap()->tracer(),
1849 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
1850 : heap()->isolate()->global_handles()->IdentifyWeakHandles(
1851 83492 : &IsUnmarkedHeapObject);
1852 83492 : ProcessMarkingWorklist();
1853 : }
1854 :
1855 : // Process finalizers, effectively keeping them alive until the next
1856 : // garbage collection.
1857 : {
1858 333968 : TRACE_GC(heap()->tracer(),
1859 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
1860 : heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
1861 83492 : &root_visitor);
1862 83492 : ProcessMarkingWorklist();
1863 : }
1864 :
1865 : // Repeat ephemeron processing from the newly marked objects.
1866 : {
1867 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
1868 83492 : ProcessEphemeronMarking();
1869 : DCHECK(marking_worklist()->IsEmbedderEmpty());
1870 83492 : DCHECK(marking_worklist()->IsEmpty());
1871 : }
1872 :
1873 : {
1874 : heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
1875 83492 : &IsUnmarkedHeapObject);
1876 83492 : }
1877 : }
1878 :
1879 83492 : if (was_marked_incrementally_) {
1880 27072 : heap()->incremental_marking()->Deactivate();
1881 83492 : }
1882 83492 : }
1883 :
1884 83492 : void MarkCompactCollector::ClearNonLiveReferences() {
1885 1168888 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
1886 :
1887 : {
1888 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
1889 :
1890 : // Prune the string table removing all strings only pointed to by the
1891 : // string table. Cannot use string_table() here because the string
1892 : // table is marked.
1893 83492 : StringTable string_table = heap()->string_table();
1894 : InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
1895 83492 : string_table->IterateElements(&internalized_visitor);
1896 83492 : string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
1897 :
1898 : ExternalStringTableCleaner external_visitor(heap());
1899 83492 : heap()->external_string_table_.IterateAll(&external_visitor);
1900 166984 : heap()->external_string_table_.CleanUpAll();
1901 : }
1902 :
1903 : {
1904 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
1905 166984 : ClearOldBytecodeCandidates();
1906 : }
1907 :
1908 : {
1909 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
1910 : // Process the weak references.
1911 : MarkCompactWeakObjectRetainer mark_compact_object_retainer(
1912 83492 : non_atomic_marking_state());
1913 166984 : heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
1914 : }
1915 :
1916 : {
1917 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
1918 : // ClearFullMapTransitions must be called before weak references are
1919 : // cleared.
1920 166984 : ClearFullMapTransitions();
1921 : }
1922 : {
1923 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
1924 83492 : ClearWeakReferences();
1925 83492 : ClearWeakCollections();
1926 166984 : ClearJSWeakCells();
1927 : }
1928 :
1929 83492 : MarkDependentCodeForDeoptimization();
1930 :
1931 : DCHECK(weak_objects_.transition_arrays.IsEmpty());
1932 : DCHECK(weak_objects_.weak_references.IsEmpty());
1933 : DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
1934 : DCHECK(weak_objects_.js_weak_refs.IsEmpty());
1935 : DCHECK(weak_objects_.js_weak_cells.IsEmpty());
1936 83492 : DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
1937 83492 : }
1938 :
1939 83492 : void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
1940 83492 : std::pair<HeapObject, Code> weak_object_in_code;
1941 390620 : while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
1942 307128 : &weak_object_in_code)) {
1943 223636 : HeapObject object = weak_object_in_code.first;
1944 223636 : Code code = weak_object_in_code.second;
1945 224432 : if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
1946 796 : !code->embedded_objects_cleared()) {
1947 282 : if (!code->marked_for_deoptimization()) {
1948 64 : code->SetMarkedForDeoptimization("weak objects");
1949 64 : have_code_to_deoptimize_ = true;
1950 : }
1951 282 : code->ClearEmbeddedObjects(heap_);
1952 : DCHECK(code->embedded_objects_cleared());
1953 : }
1954 : }
1955 83492 : }
1956 :
1957 367170 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
1958 : DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
1959 367170 : Object potential_parent = dead_target->constructor_or_backpointer();
1960 367170 : if (potential_parent->IsMap()) {
1961 : Map parent = Map::cast(potential_parent);
1962 : DisallowHeapAllocation no_gc_obviously;
1963 840438 : if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
1964 : TransitionsAccessor(isolate(), parent, &no_gc_obviously)
1965 441830 : .HasSimpleTransitionTo(dead_target)) {
1966 18545 : ClearPotentialSimpleMapTransition(parent, dead_target);
1967 : }
1968 : }
1969 367170 : }
1970 :
1971 18545 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
1972 : Map dead_target) {
1973 : DCHECK(!map->is_prototype_map());
1974 : DCHECK(!dead_target->is_prototype_map());
1975 : DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
1976 : // Take ownership of the descriptor array.
1977 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
1978 18545 : DescriptorArray descriptors = map->instance_descriptors();
1979 37090 : if (descriptors == dead_target->instance_descriptors() &&
1980 : number_of_own_descriptors > 0) {
1981 4174 : TrimDescriptorArray(map, descriptors);
1982 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
1983 : }
1984 18545 : }
1985 :
1986 111 : void MarkCompactCollector::FlushBytecodeFromSFI(
1987 : SharedFunctionInfo shared_info) {
1988 : DCHECK(shared_info->HasBytecodeArray());
1989 :
1990 : // Retain objects required for uncompiled data.
1991 111 : String inferred_name = shared_info->inferred_name();
1992 111 : int start_position = shared_info->StartPosition();
1993 111 : int end_position = shared_info->EndPosition();
1994 444 : int function_literal_id = shared_info->FunctionLiteralId(isolate());
1995 :
1996 : shared_info->DiscardCompiledMetadata(
1997 111 : isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
1998 : RecordSlot(object, slot, target);
1999 333 : });
2000 :
2001 : // The size of the bytecode array should always be larger than an
2002 : // UncompiledData object.
2003 : STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
2004 : UncompiledDataWithoutPreparseData::kSize);
2005 :
2006 : // Replace bytecode array with an uncompiled data array.
2007 111 : HeapObject compiled_data = shared_info->GetBytecodeArray();
2008 : Address compiled_data_start = compiled_data->address();
2009 111 : int compiled_data_size = compiled_data->Size();
2010 : MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
2011 :
2012 : // Clear any recorded slots for the compiled data as being invalid.
2013 : RememberedSet<OLD_TO_NEW>::RemoveRange(
2014 : chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2015 111 : SlotSet::PREFREE_EMPTY_BUCKETS);
2016 : RememberedSet<OLD_TO_OLD>::RemoveRange(
2017 : chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2018 111 : SlotSet::PREFREE_EMPTY_BUCKETS);
2019 :
2020 : // Swap the map, using set_map_after_allocation to avoid verify heap checks
2021 : // which are not necessary since we are doing this during the GC atomic pause.
2022 : compiled_data->set_map_after_allocation(
2023 : ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
2024 111 : SKIP_WRITE_BARRIER);
2025 :
2026 : // Create a filler object for any left over space in the bytecode array.
2027 111 : if (!heap()->IsLargeObject(compiled_data)) {
2028 : heap()->CreateFillerObjectAt(
2029 : compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
2030 : compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
2031 333 : ClearRecordedSlots::kNo);
2032 : }
2033 :
2034 : // Initialize the uncompiled data.
2035 : UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
2036 : UncompiledData::Initialize(
2037 : uncompiled_data, inferred_name, start_position, end_position,
2038 : function_literal_id,
2039 111 : [](HeapObject object, ObjectSlot slot, HeapObject target) {
2040 : RecordSlot(object, slot, target);
2041 333 : });
2042 :
2043 : // Mark the uncompiled data as black, and ensure all fields have already been
2044 : // marked.
2045 : DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
2046 : non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
2047 :
2048 : // Use the raw function data setter to avoid validity checks, since we're
2049 : // performing the unusual task of decompiling.
2050 111 : shared_info->set_function_data(uncompiled_data);
2051 : DCHECK(!shared_info->is_compiled());
2052 111 : }
2053 :
2054 83492 : void MarkCompactCollector::ClearOldBytecodeCandidates() {
2055 : DCHECK(FLAG_flush_bytecode ||
2056 : weak_objects_.bytecode_flushing_candidates.IsEmpty());
2057 83492 : SharedFunctionInfo flushing_candidate;
2058 167155 : while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
2059 83663 : &flushing_candidate)) {
2060 : // If the BytecodeArray is dead, flush it, which will replace the field with
2061 : // an uncompiled data object.
2062 171 : if (!non_atomic_marking_state()->IsBlackOrGrey(
2063 342 : flushing_candidate->GetBytecodeArray())) {
2064 111 : FlushBytecodeFromSFI(flushing_candidate);
2065 : }
2066 :
2067 : // Now record the slot, which has either been updated to an uncompiled data,
2068 : // or is the BytecodeArray which is still alive.
2069 : ObjectSlot slot = HeapObject::RawField(
2070 : flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
2071 : RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
2072 : }
2073 83492 : }
2074 :
2075 83492 : void MarkCompactCollector::ClearFullMapTransitions() {
2076 83492 : TransitionArray array;
2077 971271 : while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
2078 : int num_transitions = array->number_of_entries();
2079 804287 : if (num_transitions > 0) {
2080 607942 : Map map;
2081 : // The array might contain "undefined" elements because it's not yet
2082 : // filled. Allow it.
2083 607942 : if (array->GetTargetIfExists(0, isolate(), &map)) {
2084 : DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
2085 1215884 : Map parent = Map::cast(map->constructor_or_backpointer());
2086 : bool parent_is_alive =
2087 : non_atomic_marking_state()->IsBlackOrGrey(parent);
2088 : DescriptorArray descriptors = parent_is_alive
2089 : ? parent->instance_descriptors()
2090 607942 : : DescriptorArray();
2091 : bool descriptors_owner_died =
2092 607942 : CompactTransitionArray(parent, array, descriptors);
2093 607942 : if (descriptors_owner_died) {
2094 2990 : TrimDescriptorArray(parent, descriptors);
2095 : }
2096 : }
2097 : }
2098 : }
2099 83492 : }
2100 :
2101 607942 : bool MarkCompactCollector::CompactTransitionArray(Map map,
2102 : TransitionArray transitions,
2103 : DescriptorArray descriptors) {
2104 : DCHECK(!map->is_prototype_map());
2105 : int num_transitions = transitions->number_of_entries();
2106 : bool descriptors_owner_died = false;
2107 : int transition_index = 0;
2108 : // Compact all live transitions to the left.
2109 1433527 : for (int i = 0; i < num_transitions; ++i) {
2110 825585 : Map target = transitions->GetTarget(i);
2111 : DCHECK_EQ(target->constructor_or_backpointer(), map);
2112 825585 : if (non_atomic_marking_state()->IsWhite(target)) {
2113 219027 : if (!descriptors.is_null() &&
2114 219027 : target->instance_descriptors() == descriptors) {
2115 : DCHECK(!target->is_prototype_map());
2116 : descriptors_owner_died = true;
2117 : }
2118 : } else {
2119 752576 : if (i != transition_index) {
2120 25233 : Name key = transitions->GetKey(i);
2121 : transitions->SetKey(transition_index, key);
2122 : HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
2123 : RecordSlot(transitions, key_slot, key);
2124 25233 : MaybeObject raw_target = transitions->GetRawTarget(i);
2125 : transitions->SetRawTarget(transition_index, raw_target);
2126 : HeapObjectSlot target_slot =
2127 : transitions->GetTargetSlot(transition_index);
2128 25233 : RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
2129 : }
2130 752576 : transition_index++;
2131 : }
2132 : }
2133 : // If there are no transitions to be cleared, return.
2134 607942 : if (transition_index == num_transitions) {
2135 : DCHECK(!descriptors_owner_died);
2136 : return false;
2137 : }
2138 : // Note that we never eliminate a transition array, though we might right-trim
2139 : // such that number_of_transitions() == 0. If this assumption changes,
2140 : // TransitionArray::Insert() will need to deal with the case that a transition
2141 : // array disappeared during GC.
2142 17650 : int trim = transitions->Capacity() - transition_index;
2143 17650 : if (trim > 0) {
2144 : heap_->RightTrimWeakFixedArray(transitions,
2145 17650 : trim * TransitionArray::kEntrySize);
2146 : transitions->SetNumberOfTransitions(transition_index);
2147 : }
2148 17650 : return descriptors_owner_died;
2149 : }
2150 :
2151 6146 : void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
2152 : int descriptors_to_trim) {
2153 6146 : int old_nof_all_descriptors = array->number_of_all_descriptors();
2154 6146 : int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2155 : DCHECK_LT(0, descriptors_to_trim);
2156 : DCHECK_LE(0, new_nof_all_descriptors);
2157 : Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
2158 : Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
2159 : RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
2160 : start, end,
2161 6146 : SlotSet::PREFREE_EMPTY_BUCKETS);
2162 : RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
2163 : start, end,
2164 6146 : SlotSet::PREFREE_EMPTY_BUCKETS);
2165 : heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2166 6146 : ClearRecordedSlots::kNo);
2167 6146 : array->set_number_of_all_descriptors(new_nof_all_descriptors);
2168 6146 : }
2169 :
2170 7164 : void MarkCompactCollector::TrimDescriptorArray(Map map,
2171 : DescriptorArray descriptors) {
2172 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2173 7164 : if (number_of_own_descriptors == 0) {
2174 : DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2175 7164 : return;
2176 : }
2177 : // TODO(ulan): Trim only if slack is greater than some percentage threshold.
2178 : int to_trim =
2179 7087 : descriptors->number_of_all_descriptors() - number_of_own_descriptors;
2180 7087 : if (to_trim > 0) {
2181 6146 : descriptors->set_number_of_descriptors(number_of_own_descriptors);
2182 6146 : RightTrimDescriptorArray(descriptors, to_trim);
2183 :
2184 6146 : TrimEnumCache(map, descriptors);
2185 6146 : descriptors->Sort();
2186 :
2187 : if (FLAG_unbox_double_fields) {
2188 6146 : LayoutDescriptor layout_descriptor = map->layout_descriptor();
2189 : layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2190 6146 : number_of_own_descriptors);
2191 : SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2192 : }
2193 : }
2194 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2195 7087 : map->set_owns_descriptors(true);
2196 : }
2197 :
2198 6146 : void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
2199 : int live_enum = map->EnumLength();
2200 6146 : if (live_enum == kInvalidEnumCacheSentinel) {
2201 6097 : live_enum = map->NumberOfEnumerableProperties();
2202 : }
2203 12219 : if (live_enum == 0) return descriptors->ClearEnumCache();
2204 6084 : EnumCache enum_cache = descriptors->enum_cache();
2205 :
2206 6084 : FixedArray keys = enum_cache->keys();
2207 6084 : int to_trim = keys->length() - live_enum;
2208 6084 : if (to_trim <= 0) return;
2209 83 : heap_->RightTrimFixedArray(keys, to_trim);
2210 :
2211 83 : FixedArray indices = enum_cache->indices();
2212 83 : to_trim = indices->length() - live_enum;
2213 83 : if (to_trim <= 0) return;
2214 73 : heap_->RightTrimFixedArray(indices, to_trim);
2215 : }
2216 :
2217 83492 : void MarkCompactCollector::ClearWeakCollections() {
2218 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2219 83492 : EphemeronHashTable table;
2220 :
2221 175259 : while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
2222 39076 : for (int i = 0; i < table->Capacity(); i++) {
2223 39076 : HeapObject key = HeapObject::cast(table->KeyAt(i));
2224 : #ifdef VERIFY_HEAP
2225 : Object value = table->ValueAt(i);
2226 :
2227 : if (value->IsHeapObject()) {
2228 : CHECK_IMPLIES(
2229 : non_atomic_marking_state()->IsBlackOrGrey(key),
2230 : non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
2231 : }
2232 : #endif
2233 39076 : if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2234 78 : table->RemoveEntry(i);
2235 : }
2236 : }
2237 83492 : }
2238 83492 : }
2239 :
2240 83492 : void MarkCompactCollector::ClearWeakReferences() {
2241 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2242 : std::pair<HeapObject, HeapObjectSlot> slot;
2243 : HeapObjectReference cleared_weak_ref =
2244 : HeapObjectReference::ClearedValue(isolate());
2245 33386335 : while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
2246 33219351 : HeapObject value;
2247 : // The slot could have been overwritten, so we have to treat it
2248 : // as MaybeObjectSlot.
2249 : MaybeObjectSlot location(slot.second);
2250 33219351 : if ((*location)->GetHeapObjectIfWeak(&value)) {
2251 : DCHECK(!value->IsCell());
2252 33193381 : if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2253 : // The value of the weak reference is alive.
2254 : RecordSlot(slot.first, HeapObjectSlot(location), value);
2255 : } else {
2256 3038010 : if (value->IsMap()) {
2257 : // The map is non-live.
2258 367170 : ClearPotentialSimpleMapTransition(Map::cast(value));
2259 : }
2260 : location.store(cleared_weak_ref);
2261 : }
2262 : }
2263 83492 : }
2264 83492 : }
2265 :
2266 83492 : void MarkCompactCollector::ClearJSWeakCells() {
2267 83492 : if (!FLAG_harmony_weak_refs) {
2268 83105 : return;
2269 : }
2270 387 : JSWeakRef weak_ref;
2271 898 : while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
2272 : // We do not insert cleared weak cells into the list, so the value
2273 : // cannot be undefined here.
2274 124 : JSReceiver target = JSReceiver::cast(weak_ref->target());
2275 124 : if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2276 272 : weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
2277 : } else {
2278 : // The value of the JSWeakRef is alive.
2279 : ObjectSlot slot =
2280 : HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
2281 : RecordSlot(weak_ref, slot, target);
2282 : }
2283 : }
2284 387 : JSWeakCell weak_cell;
2285 1041 : while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
2286 : // We do not insert cleared weak cells into the list, so the value
2287 : // cannot be a Smi here.
2288 267 : HeapObject target = HeapObject::cast(weak_cell->target());
2289 267 : if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2290 : // The value of the JSWeakCell is dead.
2291 504 : JSWeakFactory weak_factory = JSWeakFactory::cast(weak_cell->factory());
2292 252 : if (!weak_factory->scheduled_for_cleanup()) {
2293 : heap()->AddDirtyJSWeakFactory(
2294 : weak_factory,
2295 198 : [](HeapObject object, ObjectSlot slot, Object target) {
2296 198 : if (target->IsHeapObject()) {
2297 : RecordSlot(object, slot, HeapObject::cast(target));
2298 : }
2299 594 : });
2300 : }
2301 : // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
2302 : // thus we need to record the slots it writes. The normal write barrier is
2303 : // not enough, since it's disabled before GC.
2304 : weak_cell->Nullify(isolate(),
2305 820 : [](HeapObject object, ObjectSlot slot, Object target) {
2306 820 : if (target->IsHeapObject()) {
2307 : RecordSlot(object, slot, HeapObject::cast(target));
2308 : }
2309 1324 : });
2310 : DCHECK(weak_factory->NeedsCleanup());
2311 : DCHECK(weak_factory->scheduled_for_cleanup());
2312 : } else {
2313 : // The value of the JSWeakCell is alive.
2314 : ObjectSlot slot =
2315 : HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
2316 : RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2317 : }
2318 : }
2319 : }
2320 :
2321 62867 : void MarkCompactCollector::AbortWeakObjects() {
2322 62867 : weak_objects_.transition_arrays.Clear();
2323 62868 : weak_objects_.ephemeron_hash_tables.Clear();
2324 62868 : weak_objects_.current_ephemerons.Clear();
2325 62868 : weak_objects_.next_ephemerons.Clear();
2326 62868 : weak_objects_.discovered_ephemerons.Clear();
2327 62868 : weak_objects_.weak_references.Clear();
2328 62868 : weak_objects_.weak_objects_in_code.Clear();
2329 62868 : weak_objects_.js_weak_refs.Clear();
2330 62868 : weak_objects_.js_weak_cells.Clear();
2331 62868 : weak_objects_.bytecode_flushing_candidates.Clear();
2332 62868 : }
2333 :
2334 0 : bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
2335 0 : return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
2336 : }
2337 :
2338 : MarkCompactCollector::RecordRelocSlotInfo
2339 6962194 : MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
2340 : HeapObject target) {
2341 : RecordRelocSlotInfo result;
2342 6728271 : result.should_record = false;
2343 : Page* target_page = Page::FromHeapObject(target);
2344 : Page* source_page = Page::FromHeapObject(host);
2345 13456542 : if (target_page->IsEvacuationCandidate() &&
2346 263102 : (rinfo->host().is_null() ||
2347 : !source_page->ShouldSkipEvacuationSlotRecording())) {
2348 : RelocInfo::Mode rmode = rinfo->rmode();
2349 : Address addr = rinfo->pc();
2350 : SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2351 233923 : if (rinfo->IsInConstantPool()) {
2352 : addr = rinfo->constant_pool_entry_address();
2353 : if (RelocInfo::IsCodeTargetMode(rmode)) {
2354 : slot_type = CODE_ENTRY_SLOT;
2355 : } else {
2356 : DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2357 : slot_type = OBJECT_SLOT;
2358 : }
2359 : }
2360 467814 : uintptr_t offset = addr - source_page->address();
2361 : DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
2362 233907 : result.should_record = true;
2363 233907 : result.memory_chunk = source_page;
2364 233907 : result.slot_type = slot_type;
2365 233907 : result.offset = static_cast<uint32_t>(offset);
2366 : }
2367 6728255 : return result;
2368 : }
2369 :
2370 2545978 : void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
2371 : HeapObject target) {
2372 2545978 : RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
2373 2545978 : if (info.should_record) {
2374 : RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
2375 110638 : info.offset);
2376 : }
2377 2545978 : }
2378 :
2379 : namespace {
2380 :
2381 : // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
2382 : // attempt to store a weak reference to strong-only slot to a compilation error.
2383 : template <typename TSlot, HeapObjectReferenceType reference_type>
2384 : typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
2385 :
2386 : template <>
2387 : Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
2388 : HeapObject heap_object) {
2389 : return heap_object;
2390 : }
2391 :
2392 : template <>
2393 : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2394 : HeapObject heap_object) {
2395 : return HeapObjectReference::Strong(heap_object);
2396 : }
2397 :
2398 : template <>
2399 : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
2400 : HeapObject heap_object) {
2401 : return HeapObjectReference::Weak(heap_object);
2402 : }
2403 :
2404 : #ifdef V8_COMPRESS_POINTERS
2405 : template <>
2406 : Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
2407 : HeapObject heap_object) {
2408 : return heap_object;
2409 : }
2410 :
2411 : template <>
2412 : MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2413 : HeapObject heap_object) {
2414 : return HeapObjectReference::Strong(heap_object);
2415 : }
2416 :
2417 : // The following specialization
2418 : // MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
2419 : // is not used.
2420 : #endif
2421 :
2422 : template <AccessMode access_mode, HeapObjectReferenceType reference_type,
2423 : typename TSlot>
2424 478413914 : static inline SlotCallbackResult UpdateSlot(TSlot slot,
2425 : typename TSlot::TObject old,
2426 : HeapObject heap_obj) {
2427 : static_assert(
2428 : std::is_same<TSlot, FullObjectSlot>::value ||
2429 : std::is_same<TSlot, ObjectSlot>::value ||
2430 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
2431 : std::is_same<TSlot, MaybeObjectSlot>::value,
2432 : "Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
2433 : MapWord map_word = heap_obj->map_word();
2434 478413914 : if (map_word.IsForwardingAddress()) {
2435 : DCHECK(Heap::InFromSpace(heap_obj) ||
2436 : MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2437 : Page::FromHeapObject(heap_obj)->IsFlagSet(
2438 : Page::COMPACTION_WAS_ABORTED));
2439 : typename TSlot::TObject target =
2440 : MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
2441 : if (access_mode == AccessMode::NON_ATOMIC) {
2442 : slot.store(target);
2443 : } else {
2444 : slot.Release_CompareAndSwap(old, target);
2445 : }
2446 : DCHECK(!Heap::InFromSpace(target));
2447 : DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2448 : } else {
2449 : DCHECK(heap_obj->map()->IsMap());
2450 : }
2451 : // OLD_TO_OLD slots are always removed after updating.
2452 478413914 : return REMOVE_SLOT;
2453 : }
2454 :
2455 : template <AccessMode access_mode, typename TSlot>
2456 114302400 : static inline SlotCallbackResult UpdateSlot(TSlot slot) {
2457 114302400 : typename TSlot::TObject obj = slot.Relaxed_Load();
2458 114302400 : HeapObject heap_obj;
2459 114302400 : if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
2460 4692483 : UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
2461 109699757 : } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
2462 : return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2463 77337031 : heap_obj);
2464 : }
2465 : return REMOVE_SLOT;
2466 : }
2467 :
2468 : template <AccessMode access_mode, typename TSlot>
2469 431447017 : static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
2470 : DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
2471 431447017 : typename TSlot::TObject obj = slot.Relaxed_Load();
2472 431447017 : HeapObject heap_obj;
2473 431447017 : if (obj.GetHeapObject(&heap_obj)) {
2474 : return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2475 390253606 : heap_obj);
2476 : }
2477 : return REMOVE_SLOT;
2478 : }
2479 :
2480 : } // namespace
2481 :
2482 : // Visitor for updating root pointers and to-space pointers.
2483 : // It does not expect to encounter pointers to dead objects.
2484 168693 : class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
2485 : public:
2486 169148 : PointersUpdatingVisitor() {}
2487 :
2488 33541 : void VisitPointer(HeapObject host, ObjectSlot p) override {
2489 : UpdateStrongSlotInternal(p);
2490 33541 : }
2491 :
2492 15 : void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
2493 : UpdateSlotInternal(p);
2494 15 : }
2495 :
2496 14931383 : void VisitPointers(HeapObject host, ObjectSlot start,
2497 : ObjectSlot end) override {
2498 167625301 : for (ObjectSlot p = start; p < end; ++p) {
2499 : UpdateStrongSlotInternal(p);
2500 : }
2501 14871514 : }
2502 :
2503 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
2504 : MaybeObjectSlot end) final {
2505 72945174 : for (MaybeObjectSlot p = start; p < end; ++p) {
2506 : UpdateSlotInternal(p);
2507 : }
2508 0 : }
2509 :
2510 256662223 : void VisitRootPointer(Root root, const char* description,
2511 : FullObjectSlot p) override {
2512 : UpdateRootSlotInternal(p);
2513 256662224 : }
2514 :
2515 1917881 : void VisitRootPointers(Root root, const char* description,
2516 : FullObjectSlot start, FullObjectSlot end) override {
2517 40265064 : for (FullObjectSlot p = start; p < end; ++p) {
2518 : UpdateRootSlotInternal(p);
2519 : }
2520 1917881 : }
2521 :
2522 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
2523 : // This visitor nevers visits code objects.
2524 0 : UNREACHABLE();
2525 : }
2526 :
2527 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
2528 : // This visitor nevers visits code objects.
2529 0 : UNREACHABLE();
2530 : }
2531 :
2532 : private:
2533 : static inline SlotCallbackResult UpdateRootSlotInternal(FullObjectSlot slot) {
2534 293091525 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2535 : }
2536 :
2537 : static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2538 : MaybeObjectSlot slot) {
2539 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2540 : }
2541 :
2542 : static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
2543 137855945 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2544 : }
2545 :
2546 : static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
2547 69291666 : return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
2548 : }
2549 : };
2550 :
2551 119486 : static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
2552 : FullObjectSlot p) {
2553 : MapWord map_word = HeapObject::cast(*p)->map_word();
2554 :
2555 119486 : if (map_word.IsForwardingAddress()) {
2556 502 : String new_string = String::cast(map_word.ToForwardingAddress());
2557 :
2558 502 : if (new_string->IsExternalString()) {
2559 : MemoryChunk::MoveExternalBackingStoreBytes(
2560 : ExternalBackingStoreType::kExternalString,
2561 : Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2562 1004 : ExternalString::cast(new_string)->ExternalPayloadSize());
2563 : }
2564 502 : return new_string;
2565 : }
2566 :
2567 : return String::cast(*p);
2568 : }
2569 :
2570 83492 : void MarkCompactCollector::EvacuatePrologue() {
2571 : // New space.
2572 166984 : NewSpace* new_space = heap()->new_space();
2573 : // Append the list of new space pages to be processed.
2574 176628 : for (Page* p :
2575 93136 : PageRange(new_space->first_allocatable_address(), new_space->top())) {
2576 93136 : new_space_evacuation_pages_.push_back(p);
2577 : }
2578 83492 : new_space->Flip();
2579 83492 : new_space->ResetLinearAllocationArea();
2580 :
2581 83492 : heap()->new_lo_space()->Flip();
2582 :
2583 : // Old space.
2584 : DCHECK(old_space_evacuation_pages_.empty());
2585 83492 : old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2586 : evacuation_candidates_.clear();
2587 : DCHECK(evacuation_candidates_.empty());
2588 83492 : }
2589 :
2590 83492 : void MarkCompactCollector::EvacuateEpilogue() {
2591 : aborted_evacuation_candidates_.clear();
2592 : // New space.
2593 417460 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
2594 : // Deallocate unmarked large objects.
2595 83492 : heap()->lo_space()->FreeUnmarkedObjects();
2596 83492 : heap()->code_lo_space()->FreeUnmarkedObjects();
2597 83492 : heap()->new_lo_space()->FreeUnmarkedObjects();
2598 : // Old space. Deallocate evacuated candidate pages.
2599 83492 : ReleaseEvacuationCandidates();
2600 : // Give pages that are queued to be freed back to the OS.
2601 83492 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2602 : #ifdef DEBUG
2603 : // Old-to-old slot sets must be empty after evacuation.
2604 : for (Page* p : *heap()->old_space()) {
2605 : DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2606 : DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2607 : DCHECK_NULL(p->invalidated_slots());
2608 : }
2609 : #endif
2610 83492 : }
2611 :
2612 : class Evacuator : public Malloced {
2613 : public:
2614 : enum EvacuationMode {
2615 : kObjectsNewToOld,
2616 : kPageNewToOld,
2617 : kObjectsOldToOld,
2618 : kPageNewToNew,
2619 : };
2620 :
2621 : static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2622 : // Note: The order of checks is important in this function.
2623 190653 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2624 : return kPageNewToOld;
2625 189568 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2626 : return kPageNewToNew;
2627 186639 : if (chunk->InNewSpace()) return kObjectsNewToOld;
2628 : return kObjectsOldToOld;
2629 : }
2630 :
2631 : // NewSpacePages with more live bytes than this threshold qualify for fast
2632 : // evacuation.
2633 63580 : static intptr_t NewSpacePageEvacuationThreshold() {
2634 63580 : if (FLAG_page_promotion)
2635 127100 : return FLAG_page_promotion_threshold *
2636 127100 : MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
2637 30 : return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
2638 : }
2639 :
2640 85550 : Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
2641 : : heap_(heap),
2642 : local_allocator_(heap_),
2643 : local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2644 : new_space_visitor_(heap_, &local_allocator_, record_visitor,
2645 : &local_pretenuring_feedback_),
2646 : new_to_new_page_visitor_(heap_, record_visitor,
2647 : &local_pretenuring_feedback_),
2648 : new_to_old_page_visitor_(heap_, record_visitor,
2649 : &local_pretenuring_feedback_),
2650 :
2651 : old_space_visitor_(heap_, &local_allocator_, record_visitor),
2652 : duration_(0.0),
2653 256650 : bytes_compacted_(0) {}
2654 :
2655 256650 : virtual ~Evacuator() = default;
2656 :
2657 : void EvacuatePage(MemoryChunk* chunk);
2658 :
2659 808 : void AddObserver(MigrationObserver* observer) {
2660 : new_space_visitor_.AddObserver(observer);
2661 : old_space_visitor_.AddObserver(observer);
2662 808 : }
2663 :
2664 : // Merge back locally cached info sequentially. Note that this method needs
2665 : // to be called from the main thread.
2666 : inline void Finalize();
2667 :
2668 : virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2669 :
2670 : protected:
2671 : static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2672 :
2673 : // |saved_live_bytes| returns the live bytes of the page that was processed.
2674 : virtual void RawEvacuatePage(MemoryChunk* chunk,
2675 : intptr_t* saved_live_bytes) = 0;
2676 :
2677 : inline Heap* heap() { return heap_; }
2678 :
2679 : void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2680 87245 : duration_ += duration;
2681 87245 : bytes_compacted_ += bytes_compacted;
2682 : }
2683 :
2684 : Heap* heap_;
2685 :
2686 : // Locally cached collector data.
2687 : LocalAllocator local_allocator_;
2688 : Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2689 :
2690 : // Visitors for the corresponding spaces.
2691 : EvacuateNewSpaceVisitor new_space_visitor_;
2692 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
2693 : new_to_new_page_visitor_;
2694 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
2695 : new_to_old_page_visitor_;
2696 : EvacuateOldSpaceVisitor old_space_visitor_;
2697 :
2698 : // Book keeping info.
2699 : double duration_;
2700 : intptr_t bytes_compacted_;
2701 : };
2702 :
2703 174355 : void Evacuator::EvacuatePage(MemoryChunk* chunk) {
2704 174355 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
2705 : DCHECK(chunk->SweepingDone());
2706 87178 : intptr_t saved_live_bytes = 0;
2707 87178 : double evacuation_time = 0.0;
2708 : {
2709 : AlwaysAllocateScope always_allocate(heap()->isolate());
2710 : TimedScope timed_scope(&evacuation_time);
2711 87167 : RawEvacuatePage(chunk, &saved_live_bytes);
2712 : }
2713 87245 : ReportCompactionProgress(evacuation_time, saved_live_bytes);
2714 87245 : if (FLAG_trace_evacuation) {
2715 : PrintIsolate(heap()->isolate(),
2716 : "evacuation[%p]: page=%p new_space=%d "
2717 : "page_evacuation=%d executable=%d contains_age_mark=%d "
2718 : "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
2719 : static_cast<void*>(this), static_cast<void*>(chunk),
2720 : chunk->InNewSpace(),
2721 0 : chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2722 : chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2723 : chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2724 0 : chunk->Contains(heap()->new_space()->age_mark()),
2725 : saved_live_bytes, evacuation_time,
2726 0 : chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2727 87245 : }
2728 87244 : }
2729 :
2730 513300 : void Evacuator::Finalize() {
2731 85550 : local_allocator_.Finalize();
2732 171100 : heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
2733 342200 : heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
2734 256650 : new_to_old_page_visitor_.moved_bytes());
2735 : heap()->IncrementSemiSpaceCopiedObjectSize(
2736 85550 : new_space_visitor_.semispace_copied_size() +
2737 256650 : new_to_new_page_visitor_.moved_bytes());
2738 : heap()->IncrementYoungSurvivorsCounter(
2739 85550 : new_space_visitor_.promoted_size() +
2740 85550 : new_space_visitor_.semispace_copied_size() +
2741 85550 : new_to_old_page_visitor_.moved_bytes() +
2742 85550 : new_to_new_page_visitor_.moved_bytes());
2743 171100 : heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
2744 85550 : }
2745 :
2746 171100 : class FullEvacuator : public Evacuator {
2747 : public:
2748 : FullEvacuator(MarkCompactCollector* collector,
2749 : RecordMigratedSlotVisitor* record_visitor)
2750 85550 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
2751 :
2752 84575 : GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
2753 84575 : return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
2754 : }
2755 :
2756 : protected:
2757 : void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
2758 :
2759 : MarkCompactCollector* collector_;
2760 : };
2761 :
2762 87135 : void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
2763 87135 : const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
2764 174273 : TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2765 : "FullEvacuator::RawEvacuatePage", "evacuation_mode",
2766 : evacuation_mode);
2767 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
2768 87138 : collector_->non_atomic_marking_state();
2769 87138 : *live_bytes = marking_state->live_bytes(chunk);
2770 87138 : HeapObject failed_object;
2771 87138 : switch (evacuation_mode) {
2772 : case kObjectsNewToOld:
2773 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2774 : chunk, marking_state, &new_space_visitor_,
2775 74805 : LiveObjectVisitor::kClearMarkbits);
2776 : // ArrayBufferTracker will be updated during pointers updating.
2777 74816 : break;
2778 : case kPageNewToOld:
2779 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2780 : chunk, marking_state, &new_to_old_page_visitor_,
2781 581 : LiveObjectVisitor::kKeepMarking);
2782 : new_to_old_page_visitor_.account_moved_bytes(
2783 : marking_state->live_bytes(chunk));
2784 : // ArrayBufferTracker will be updated during sweeping.
2785 : break;
2786 : case kPageNewToNew:
2787 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2788 : chunk, marking_state, &new_to_new_page_visitor_,
2789 1463 : LiveObjectVisitor::kKeepMarking);
2790 : new_to_new_page_visitor_.account_moved_bytes(
2791 : marking_state->live_bytes(chunk));
2792 : // ArrayBufferTracker will be updated during sweeping.
2793 : break;
2794 : case kObjectsOldToOld: {
2795 : const bool success = LiveObjectVisitor::VisitBlackObjects(
2796 : chunk, marking_state, &old_space_visitor_,
2797 10334 : LiveObjectVisitor::kClearMarkbits, &failed_object);
2798 10382 : if (!success) {
2799 : // Aborted compaction page. Actual processing happens on the main
2800 : // thread for simplicity reasons.
2801 45 : collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
2802 : } else {
2803 : // ArrayBufferTracker will be updated during pointers updating.
2804 : }
2805 : break;
2806 : }
2807 87200 : }
2808 87245 : }
2809 :
2810 : class EvacuationItem : public ItemParallelJob::Item {
2811 : public:
2812 87245 : explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
2813 174490 : ~EvacuationItem() override = default;
2814 : MemoryChunk* chunk() const { return chunk_; }
2815 :
2816 : private:
2817 : MemoryChunk* chunk_;
2818 : };
2819 :
2820 85545 : class PageEvacuationTask : public ItemParallelJob::Task {
2821 : public:
2822 : PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
2823 : : ItemParallelJob::Task(isolate),
2824 : evacuator_(evacuator),
2825 85550 : tracer_(isolate->heap()->tracer()) {}
2826 :
2827 84604 : void RunInParallel() override {
2828 338425 : TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
2829 87225 : EvacuationItem* item = nullptr;
2830 171834 : while ((item = GetItem<EvacuationItem>()) != nullptr) {
2831 87225 : evacuator_->EvacuatePage(item->chunk());
2832 87244 : item->MarkFinished();
2833 84642 : }
2834 84644 : };
2835 :
2836 : private:
2837 : Evacuator* evacuator_;
2838 : GCTracer* tracer_;
2839 : };
2840 :
2841 : template <class Evacuator, class Collector>
2842 70969 : void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
2843 : Collector* collector, ItemParallelJob* job,
2844 : RecordMigratedSlotVisitor* record_visitor,
2845 156519 : MigrationObserver* migration_observer, const intptr_t live_bytes) {
2846 : // Used for trace summary.
2847 : double compaction_speed = 0;
2848 70969 : if (FLAG_trace_evacuation) {
2849 0 : compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
2850 : }
2851 :
2852 70969 : const bool profiling = isolate()->LogObjectRelocation();
2853 : ProfilingMigrationObserver profiling_observer(heap());
2854 :
2855 : const int wanted_num_tasks =
2856 70969 : NumberOfParallelCompactionTasks(job->NumberOfItems());
2857 70969 : Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
2858 156519 : for (int i = 0; i < wanted_num_tasks; i++) {
2859 85550 : evacuators[i] = new Evacuator(collector, record_visitor);
2860 85550 : if (profiling) evacuators[i]->AddObserver(&profiling_observer);
2861 85550 : if (migration_observer != nullptr)
2862 0 : evacuators[i]->AddObserver(migration_observer);
2863 171100 : job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
2864 : }
2865 70969 : job->Run(isolate()->async_counters());
2866 156519 : for (int i = 0; i < wanted_num_tasks; i++) {
2867 85550 : evacuators[i]->Finalize();
2868 85550 : delete evacuators[i];
2869 : }
2870 70969 : delete[] evacuators;
2871 :
2872 70969 : if (FLAG_trace_evacuation) {
2873 0 : PrintIsolate(isolate(),
2874 : "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
2875 : "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
2876 : " compaction_speed=%.f\n",
2877 : isolate()->time_millis_since_init(),
2878 : FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
2879 : wanted_num_tasks, job->NumberOfTasks(),
2880 0 : V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
2881 0 : live_bytes, compaction_speed);
2882 : }
2883 70969 : }
2884 :
2885 78915 : bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
2886 76863 : const bool reduce_memory = heap()->ShouldReduceMemory();
2887 76863 : const Address age_mark = heap()->new_space()->age_mark();
2888 127160 : return !reduce_memory && !p->NeverEvacuate() &&
2889 66433 : (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
2890 80967 : !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
2891 : }
2892 :
2893 83492 : void MarkCompactCollector::EvacuatePagesInParallel() {
2894 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
2895 251058 : &page_parallel_job_semaphore_);
2896 : intptr_t live_bytes = 0;
2897 :
2898 177366 : for (Page* page : old_space_evacuation_pages_) {
2899 10382 : live_bytes += non_atomic_marking_state()->live_bytes(page);
2900 10382 : evacuation_job.AddItem(new EvacuationItem(page));
2901 : }
2902 :
2903 260702 : for (Page* page : new_space_evacuation_pages_) {
2904 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
2905 93136 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
2906 76863 : live_bytes += live_bytes_on_page;
2907 76863 : if (ShouldMovePage(page, live_bytes_on_page)) {
2908 2047 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
2909 582 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
2910 : DCHECK_EQ(heap()->old_space(), page->owner());
2911 : // The move added page->allocated_bytes to the old space, but we are
2912 : // going to sweep the page and add page->live_byte_count.
2913 582 : heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
2914 : page);
2915 : } else {
2916 1465 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
2917 : }
2918 : }
2919 76863 : evacuation_job.AddItem(new EvacuationItem(page));
2920 : }
2921 :
2922 : // Promote young generation large objects.
2923 83492 : LargePage* current = heap()->new_lo_space()->first_page();
2924 : IncrementalMarking::NonAtomicMarkingState* marking_state =
2925 : heap()->incremental_marking()->non_atomic_marking_state();
2926 166984 : while (current) {
2927 : LargePage* next_current = current->next_page();
2928 : HeapObject object = current->GetObject();
2929 : DCHECK(!marking_state->IsGrey(object));
2930 0 : if (marking_state->IsBlack(object)) {
2931 0 : heap_->lo_space()->PromoteNewLargeObject(current);
2932 : current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
2933 0 : evacuation_job.AddItem(new EvacuationItem(current));
2934 : }
2935 : current = next_current;
2936 : }
2937 :
2938 96015 : if (evacuation_job.NumberOfItems() == 0) return;
2939 :
2940 : RecordMigratedSlotVisitor record_visitor(this);
2941 : CreateAndExecuteEvacuationTasks<FullEvacuator>(
2942 70969 : this, &evacuation_job, &record_visitor, nullptr, live_bytes);
2943 141938 : PostProcessEvacuationCandidates();
2944 : }
2945 :
2946 83492 : class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2947 : public:
2948 166984 : Object RetainAs(Object object) override {
2949 166984 : if (object->IsHeapObject()) {
2950 : HeapObject heap_object = HeapObject::cast(object);
2951 : MapWord map_word = heap_object->map_word();
2952 166984 : if (map_word.IsForwardingAddress()) {
2953 3276 : return map_word.ToForwardingAddress();
2954 : }
2955 : }
2956 163708 : return object;
2957 : }
2958 : };
2959 :
2960 0 : void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
2961 0 : EvacuateRecordOnlyVisitor visitor(heap());
2962 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
2963 : &visitor,
2964 0 : LiveObjectVisitor::kKeepMarking);
2965 0 : }
2966 :
2967 : template <class Visitor, typename MarkingState>
2968 10307 : bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
2969 : MarkingState* marking_state,
2970 : Visitor* visitor,
2971 : IterationMode iteration_mode,
2972 : HeapObject* failed_object) {
2973 20616 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2974 : "LiveObjectVisitor::VisitBlackObjects");
2975 67694620 : for (auto object_and_size :
2976 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2977 : HeapObject const object = object_and_size.first;
2978 33836973 : if (!visitor->Visit(object, object_and_size.second)) {
2979 45 : if (iteration_mode == kClearMarkbits) {
2980 45 : marking_state->bitmap(chunk)->ClearRange(
2981 : chunk->AddressToMarkbitIndex(chunk->area_start()),
2982 : chunk->AddressToMarkbitIndex(object->address()));
2983 45 : *failed_object = object;
2984 : }
2985 : return false;
2986 : }
2987 : }
2988 10337 : if (iteration_mode == kClearMarkbits) {
2989 10337 : marking_state->ClearLiveness(chunk);
2990 : }
2991 10382 : return true;
2992 : }
2993 :
2994 : template <class Visitor, typename MarkingState>
2995 76881 : void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
2996 : MarkingState* marking_state,
2997 : Visitor* visitor,
2998 : IterationMode iteration_mode) {
2999 153763 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3000 : "LiveObjectVisitor::VisitBlackObjectsNoFail");
3001 : DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
3002 76882 : if (chunk->owner()->identity() == LO_SPACE) {
3003 0 : HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
3004 : DCHECK(marking_state->IsBlack(object));
3005 0 : const bool success = visitor->Visit(object, object->Size());
3006 : USE(success);
3007 : DCHECK(success);
3008 : } else {
3009 88617196 : for (auto object_and_size :
3010 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3011 42039706 : HeapObject const object = object_and_size.first;
3012 : DCHECK(marking_state->IsBlack(object));
3013 42039706 : const bool success = visitor->Visit(object, object_and_size.second);
3014 : USE(success);
3015 : DCHECK(success);
3016 : }
3017 : }
3018 76908 : if (iteration_mode == kClearMarkbits) {
3019 74816 : marking_state->ClearLiveness(chunk);
3020 76908 : }
3021 76908 : }
3022 :
3023 : template <class Visitor, typename MarkingState>
3024 0 : void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
3025 : MarkingState* marking_state,
3026 : Visitor* visitor,
3027 : IterationMode iteration_mode) {
3028 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3029 : "LiveObjectVisitor::VisitGreyObjectsNoFail");
3030 0 : for (auto object_and_size :
3031 : LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
3032 0 : HeapObject const object = object_and_size.first;
3033 : DCHECK(marking_state->IsGrey(object));
3034 0 : const bool success = visitor->Visit(object, object_and_size.second);
3035 : USE(success);
3036 : DCHECK(success);
3037 : }
3038 0 : if (iteration_mode == kClearMarkbits) {
3039 0 : marking_state->ClearLiveness(chunk);
3040 0 : }
3041 0 : }
3042 :
3043 : template <typename MarkingState>
3044 45 : void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
3045 : MarkingState* marking_state) {
3046 : int new_live_size = 0;
3047 5270 : for (auto object_and_size :
3048 : LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
3049 2590 : new_live_size += object_and_size.second;
3050 : }
3051 : marking_state->SetLiveBytes(chunk, new_live_size);
3052 45 : }
3053 :
3054 85584 : void MarkCompactCollector::Evacuate() {
3055 1001904 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3056 83492 : base::MutexGuard guard(heap()->relocation_mutex());
3057 :
3058 : {
3059 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
3060 166984 : EvacuatePrologue();
3061 : }
3062 :
3063 : {
3064 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3065 : EvacuationScope evacuation_scope(this);
3066 166984 : EvacuatePagesInParallel();
3067 : }
3068 :
3069 83492 : UpdatePointersAfterEvacuation();
3070 :
3071 : {
3072 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
3073 83492 : if (!heap()->new_space()->Rebalance()) {
3074 0 : heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
3075 83492 : }
3076 : }
3077 :
3078 : // Give pages that are queued to be freed back to the OS. Note that filtering
3079 : // slots only handles old space (for unboxed doubles), and thus map space can
3080 : // still contain stale pointers. We only free the chunks after pointer updates
3081 : // to still have access to page headers.
3082 83492 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3083 :
3084 : {
3085 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3086 :
3087 260120 : for (Page* p : new_space_evacuation_pages_) {
3088 93136 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3089 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3090 1465 : sweeper()->AddPageForIterability(p);
3091 91671 : } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3092 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3093 : DCHECK_EQ(OLD_SPACE, p->owner()->identity());
3094 582 : sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
3095 : }
3096 : }
3097 : new_space_evacuation_pages_.clear();
3098 :
3099 177366 : for (Page* p : old_space_evacuation_pages_) {
3100 : // Important: skip list should be cleared only after roots were updated
3101 : // because root iteration traverses the stack and might have to find
3102 : // code objects from non-updated pc pointing into evacuation candidate.
3103 10382 : SkipList* list = p->skip_list();
3104 10382 : if (list != nullptr) list->Clear();
3105 10382 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3106 90 : sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
3107 : p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3108 : }
3109 83492 : }
3110 : }
3111 :
3112 : {
3113 333968 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
3114 166984 : EvacuateEpilogue();
3115 83492 : }
3116 :
3117 : #ifdef VERIFY_HEAP
3118 : if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
3119 : FullEvacuationVerifier verifier(heap());
3120 : verifier.Run();
3121 : }
3122 : #endif
3123 83492 : }
3124 :
3125 526214 : class UpdatingItem : public ItemParallelJob::Item {
3126 : public:
3127 526214 : ~UpdatingItem() override = default;
3128 : virtual void Process() = 0;
3129 : };
3130 :
3131 350331 : class PointersUpdatingTask : public ItemParallelJob::Task {
3132 : public:
3133 : explicit PointersUpdatingTask(Isolate* isolate,
3134 : GCTracer::BackgroundScope::ScopeId scope)
3135 : : ItemParallelJob::Task(isolate),
3136 350855 : tracer_(isolate->heap()->tracer()),
3137 701710 : scope_(scope) {}
3138 :
3139 312836 : void RunInParallel() override {
3140 1250900 : TRACE_BACKGROUND_GC(tracer_, scope_);
3141 : UpdatingItem* item = nullptr;
3142 838640 : while ((item = GetItem<UpdatingItem>()) != nullptr) {
3143 525860 : item->Process();
3144 525644 : item->MarkFinished();
3145 312447 : }
3146 313102 : };
3147 :
3148 : private:
3149 : GCTracer* tracer_;
3150 : GCTracer::BackgroundScope::ScopeId scope_;
3151 : };
3152 :
3153 : template <typename MarkingState>
3154 : class ToSpaceUpdatingItem : public UpdatingItem {
3155 : public:
3156 : explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
3157 : MarkingState* marking_state)
3158 : : chunk_(chunk),
3159 : start_(start),
3160 : end_(end),
3161 85202 : marking_state_(marking_state) {}
3162 170404 : ~ToSpaceUpdatingItem() override = default;
3163 :
3164 85200 : void Process() override {
3165 170400 : if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3166 : // New->new promoted pages contain garbage so they require iteration using
3167 : // markbits.
3168 1463 : ProcessVisitLive();
3169 : } else {
3170 83737 : ProcessVisitAll();
3171 : }
3172 85201 : }
3173 :
3174 : private:
3175 84194 : void ProcessVisitAll() {
3176 168388 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3177 : "ToSpaceUpdatingItem::ProcessVisitAll");
3178 : PointersUpdatingVisitor visitor;
3179 25073070 : for (Address cur = start_; cur < end_;) {
3180 24905139 : HeapObject object = HeapObject::FromAddress(cur);
3181 : Map map = object->map();
3182 24905139 : int size = object->SizeFromMap(map);
3183 : object->IterateBodyFast(map, size, &visitor);
3184 24904682 : cur += size;
3185 83737 : }
3186 83737 : }
3187 :
3188 1462 : void ProcessVisitLive() {
3189 2924 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3190 : "ToSpaceUpdatingItem::ProcessVisitLive");
3191 : // For young generation evacuations we want to visit grey objects, for
3192 : // full MC, we need to visit black objects.
3193 : PointersUpdatingVisitor visitor;
3194 14027851 : for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
3195 : chunk_, marking_state_->bitmap(chunk_))) {
3196 4674487 : object_and_size.first->IterateBodyFast(&visitor);
3197 1464 : }
3198 1464 : }
3199 :
3200 : MemoryChunk* chunk_;
3201 : Address start_;
3202 : Address end_;
3203 : MarkingState* marking_state_;
3204 : };
3205 :
3206 : template <typename MarkingState>
3207 : class RememberedSetUpdatingItem : public UpdatingItem {
3208 : public:
3209 : explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
3210 : MemoryChunk* chunk,
3211 : RememberedSetUpdatingMode updating_mode)
3212 : : heap_(heap),
3213 : marking_state_(marking_state),
3214 : chunk_(chunk),
3215 348684 : updating_mode_(updating_mode) {}
3216 697368 : ~RememberedSetUpdatingItem() override = default;
3217 :
3218 347987 : void Process() override {
3219 695975 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3220 : "RememberedSetUpdatingItem::Process");
3221 347988 : base::MutexGuard guard(chunk_->mutex());
3222 348257 : CodePageMemoryModificationScope memory_modification_scope(chunk_);
3223 348150 : UpdateUntypedPointers();
3224 696907 : UpdateTypedPointers();
3225 348376 : }
3226 :
3227 : private:
3228 : template <typename TSlot>
3229 43987185 : inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
3230 : static_assert(
3231 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
3232 : std::is_same<TSlot, MaybeObjectSlot>::value,
3233 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
3234 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
3235 43987185 : HeapObject heap_object;
3236 43987185 : if (!(*slot).GetHeapObject(&heap_object)) {
3237 : return REMOVE_SLOT;
3238 : }
3239 44026809 : if (Heap::InFromSpace(heap_object)) {
3240 : MapWord map_word = heap_object->map_word();
3241 40482644 : if (map_word.IsForwardingAddress()) {
3242 : HeapObjectReference::Update(THeapObjectSlot(slot),
3243 : map_word.ToForwardingAddress());
3244 : }
3245 40467875 : bool success = (*slot).GetHeapObject(&heap_object);
3246 : USE(success);
3247 : DCHECK(success);
3248 : // If the object was in from space before and is after executing the
3249 : // callback in to space, the object is still live.
3250 : // Unfortunately, we do not know about the slot. It could be in a
3251 : // just freed free space object.
3252 40428008 : if (Heap::InToSpace(heap_object)) {
3253 : return KEEP_SLOT;
3254 : }
3255 3544165 : } else if (Heap::InToSpace(heap_object)) {
3256 : // Slots can point to "to" space if the page has been moved, or if the
3257 : // slot has been recorded multiple times in the remembered set, or
3258 : // if the slot was already updated during old->old updating.
3259 : // In case the page has been moved, check markbits to determine liveness
3260 : // of the slot. In the other case, the slot can just be kept.
3261 1690100 : if (Page::FromHeapObject(heap_object)
3262 : ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3263 : // IsBlackOrGrey is required because objects are marked as grey for
3264 : // the young generation collector while they are black for the full
3265 : // MC.);
3266 1688378 : if (marking_state_->IsBlackOrGrey(heap_object)) {
3267 : return KEEP_SLOT;
3268 : } else {
3269 1399 : return REMOVE_SLOT;
3270 : }
3271 : }
3272 : return KEEP_SLOT;
3273 : } else {
3274 : DCHECK(!Heap::InNewSpace(heap_object));
3275 : }
3276 : return REMOVE_SLOT;
3277 : }
3278 :
3279 348070 : void UpdateUntypedPointers() {
3280 1044876 : if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
3281 338060 : RememberedSet<OLD_TO_NEW>::Iterate(
3282 : chunk_,
3283 : [this](MaybeObjectSlot slot) {
3284 43938374 : return CheckAndUpdateOldToNewSlot(slot);
3285 43938374 : },
3286 : SlotSet::PREFREE_EMPTY_BUCKETS);
3287 : }
3288 696880 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3289 348480 : (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
3290 17868 : InvalidatedSlotsFilter filter(chunk_);
3291 35668 : RememberedSet<OLD_TO_OLD>::Iterate(
3292 : chunk_,
3293 45449282 : [&filter](MaybeObjectSlot slot) {
3294 45449282 : if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3295 45137296 : return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3296 : },
3297 17834 : SlotSet::PREFREE_EMPTY_BUCKETS);
3298 : }
3299 696735 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3300 348326 : chunk_->invalidated_slots() != nullptr) {
3301 : #ifdef DEBUG
3302 : for (auto object_size : *chunk_->invalidated_slots()) {
3303 : HeapObject object = object_size.first;
3304 : int size = object_size.second;
3305 : DCHECK_LE(object->SizeFromMap(object->map()), size);
3306 : }
3307 : #endif
3308 : // The invalidated slots are not needed after old-to-old slots were
3309 : // processsed.
3310 120 : chunk_->ReleaseInvalidatedSlots();
3311 : }
3312 348409 : }
3313 :
3314 348257 : void UpdateTypedPointers() {
3315 696537 : if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3316 : nullptr) {
3317 5483 : CHECK_NE(chunk_->owner(), heap_->map_space());
3318 : const auto check_and_update_old_to_new_slot_fn =
3319 : [this](FullMaybeObjectSlot slot) {
3320 49418 : return CheckAndUpdateOldToNewSlot(slot);
3321 49418 : };
3322 4402 : RememberedSet<OLD_TO_NEW>::IterateTyped(
3323 : chunk_, [=](SlotType slot_type, Address slot) {
3324 : return UpdateTypedSlotHelper::UpdateTypedSlot(
3325 49446 : heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3326 51647 : });
3327 : }
3328 696620 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3329 348280 : (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3330 : nullptr)) {
3331 3243 : CHECK_NE(chunk_->owner(), heap_->map_space());
3332 2162 : RememberedSet<OLD_TO_OLD>::IterateTyped(
3333 : chunk_, [=](SlotType slot_type, Address slot) {
3334 : // Using UpdateStrongSlot is OK here, because there are no weak
3335 : // typed slots.
3336 : return UpdateTypedSlotHelper::UpdateTypedSlot(
3337 : heap_, slot_type, slot,
3338 234182 : UpdateStrongSlot<AccessMode::NON_ATOMIC, FullMaybeObjectSlot>);
3339 235263 : });
3340 : }
3341 348340 : }
3342 :
3343 : Heap* heap_;
3344 : MarkingState* marking_state_;
3345 : MemoryChunk* chunk_;
3346 : RememberedSetUpdatingMode updating_mode_;
3347 : };
3348 :
3349 85202 : UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
3350 : MemoryChunk* chunk, Address start, Address end) {
3351 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
3352 170404 : chunk, start, end, non_atomic_marking_state());
3353 : }
3354 :
3355 348684 : UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
3356 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3357 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
3358 697368 : heap(), non_atomic_marking_state(), chunk, updating_mode);
3359 : }
3360 :
3361 : class GlobalHandlesUpdatingItem : public UpdatingItem {
3362 : public:
3363 : GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
3364 : size_t end)
3365 0 : : global_handles_(global_handles), start_(start), end_(end) {}
3366 0 : ~GlobalHandlesUpdatingItem() override = default;
3367 :
3368 0 : void Process() override {
3369 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3370 : "GlobalHandlesUpdatingItem::Process");
3371 : PointersUpdatingVisitor updating_visitor;
3372 0 : global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
3373 0 : }
3374 :
3375 : private:
3376 : GlobalHandles* global_handles_;
3377 : size_t start_;
3378 : size_t end_;
3379 : };
3380 :
3381 : // Update array buffers on a page that has been evacuated by copying objects.
3382 : // Target page exclusivity in old space is guaranteed by the fact that
3383 : // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
3384 : // free list items of a given page. For new space the tracker will update
3385 : // using a lock.
3386 : class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
3387 : public:
3388 : enum EvacuationState { kRegular, kAborted };
3389 :
3390 : explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
3391 92328 : : page_(page), state_(state) {}
3392 184656 : ~ArrayBufferTrackerUpdatingItem() override = default;
3393 :
3394 92305 : void Process() override {
3395 184611 : TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3396 : "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
3397 : state_);
3398 92306 : switch (state_) {
3399 : case EvacuationState::kRegular:
3400 : ArrayBufferTracker::ProcessBuffers(
3401 92303 : page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3402 92327 : break;
3403 : case EvacuationState::kAborted:
3404 : ArrayBufferTracker::ProcessBuffers(
3405 0 : page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3406 0 : break;
3407 92330 : }
3408 92325 : }
3409 :
3410 : private:
3411 : Page* const page_;
3412 : const EvacuationState state_;
3413 : };
3414 :
3415 83492 : int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3416 83492 : ItemParallelJob* job) {
3417 : // Seed to space pages.
3418 83492 : const Address space_start = heap()->new_space()->first_allocatable_address();
3419 : const Address space_end = heap()->new_space()->top();
3420 : int pages = 0;
3421 168694 : for (Page* page : PageRange(space_start, space_end)) {
3422 : Address start =
3423 86912 : page->Contains(space_start) ? space_start : page->area_start();
3424 85202 : Address end = page->Contains(space_end) ? space_end : page->area_end();
3425 85202 : job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
3426 85202 : pages++;
3427 : }
3428 83492 : if (pages == 0) return 0;
3429 83492 : return NumberOfParallelToSpacePointerUpdateTasks(pages);
3430 : }
3431 :
3432 : template <typename IterateableSpace>
3433 417460 : int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3434 : ItemParallelJob* job, IterateableSpace* space,
3435 : RememberedSetUpdatingMode mode) {
3436 : int pages = 0;
3437 1615766 : for (MemoryChunk* chunk : *space) {
3438 : const bool contains_old_to_old_slots =
3439 : chunk->slot_set<OLD_TO_OLD>() != nullptr ||
3440 1180422 : chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
3441 : const bool contains_old_to_new_slots =
3442 : chunk->slot_set<OLD_TO_NEW>() != nullptr ||
3443 859677 : chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
3444 : const bool contains_invalidated_slots =
3445 599153 : chunk->invalidated_slots() != nullptr;
3446 599153 : if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
3447 : !contains_invalidated_slots)
3448 : continue;
3449 348684 : if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3450 : contains_invalidated_slots) {
3451 348684 : job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
3452 348684 : pages++;
3453 : }
3454 : }
3455 417460 : return pages;
3456 : }
3457 :
3458 83492 : int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
3459 : ItemParallelJob* job) {
3460 : int pages = 0;
3461 351209 : for (Page* p : new_space_evacuation_pages_) {
3462 93136 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
3463 91089 : if (p->local_tracker() == nullptr) continue;
3464 :
3465 90021 : pages++;
3466 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3467 90021 : p, ArrayBufferTrackerUpdatingItem::kRegular));
3468 : }
3469 : }
3470 83492 : return pages;
3471 : }
3472 :
3473 83492 : int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
3474 : ItemParallelJob* job) {
3475 : int pages = 0;
3476 187703 : for (Page* p : old_space_evacuation_pages_) {
3477 20764 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
3478 : p->IsEvacuationCandidate()) {
3479 10337 : if (p->local_tracker() == nullptr) continue;
3480 :
3481 2307 : pages++;
3482 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3483 2307 : p, ArrayBufferTrackerUpdatingItem::kRegular));
3484 : }
3485 : }
3486 167029 : for (auto object_and_page : aborted_evacuation_candidates_) {
3487 45 : Page* p = object_and_page.second;
3488 45 : if (p->local_tracker() == nullptr) continue;
3489 :
3490 0 : pages++;
3491 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3492 0 : p, ArrayBufferTrackerUpdatingItem::kAborted));
3493 : }
3494 83492 : return pages;
3495 : }
3496 :
3497 83492 : void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3498 1252380 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3499 :
3500 : PointersUpdatingVisitor updating_visitor;
3501 :
3502 : {
3503 333968 : TRACE_GC(heap()->tracer(),
3504 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3505 166984 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3506 : }
3507 :
3508 : {
3509 333968 : TRACE_GC(heap()->tracer(),
3510 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3511 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3512 250476 : &page_parallel_job_semaphore_);
3513 :
3514 : int remembered_set_pages = 0;
3515 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3516 83492 : &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
3517 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3518 83492 : &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
3519 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3520 83492 : &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
3521 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3522 83492 : &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
3523 : const int remembered_set_tasks =
3524 : remembered_set_pages == 0
3525 : ? 0
3526 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3527 83492 : old_to_new_slots_);
3528 83492 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3529 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3530 304855 : for (int i = 0; i < num_tasks; i++) {
3531 : updating_job.AddTask(new PointersUpdatingTask(
3532 : isolate(),
3533 442726 : GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3534 : }
3535 166984 : updating_job.Run(isolate()->async_counters());
3536 : }
3537 :
3538 : {
3539 : // - Update pointers in map space in a separate phase to avoid data races
3540 : // with Map->LayoutDescriptor edge.
3541 : // - Update array buffer trackers in the second phase to have access to
3542 : // byte length which is potentially a HeapNumber.
3543 333968 : TRACE_GC(heap()->tracer(),
3544 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3545 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3546 166984 : &page_parallel_job_semaphore_);
3547 :
3548 : int array_buffer_pages = 0;
3549 83492 : array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3550 83492 : array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
3551 :
3552 : int remembered_set_pages = 0;
3553 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3554 83492 : &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
3555 : const int remembered_set_tasks =
3556 : remembered_set_pages == 0
3557 : ? 0
3558 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3559 83492 : old_to_new_slots_);
3560 : const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
3561 83492 : if (num_tasks > 0) {
3562 129492 : for (int i = 0; i < num_tasks; i++) {
3563 : updating_job.AddTask(new PointersUpdatingTask(
3564 : isolate(),
3565 258984 : GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3566 : }
3567 83492 : updating_job.Run(isolate()->async_counters());
3568 83492 : heap()->array_buffer_collector()->FreeAllocations();
3569 83492 : }
3570 : }
3571 :
3572 : {
3573 333968 : TRACE_GC(heap()->tracer(),
3574 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3575 : // Update pointers from external string table.
3576 : heap_->UpdateReferencesInExternalStringTable(
3577 83492 : &UpdateReferenceInExternalStringTableEntry);
3578 :
3579 83492 : EvacuationWeakObjectRetainer evacuation_object_retainer;
3580 166984 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3581 83492 : }
3582 83492 : }
3583 :
3584 45 : void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3585 : HeapObject failed_object, MemoryChunk* chunk) {
3586 45 : base::MutexGuard guard(&mutex_);
3587 :
3588 : aborted_evacuation_candidates_.push_back(
3589 90 : std::make_pair(failed_object, static_cast<Page*>(chunk)));
3590 45 : }
3591 :
3592 70969 : void MarkCompactCollector::PostProcessEvacuationCandidates() {
3593 141983 : for (auto object_and_page : aborted_evacuation_candidates_) {
3594 : HeapObject failed_object = object_and_page.first;
3595 : Page* page = object_and_page.second;
3596 : page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3597 : // Aborted compaction page. We have to record slots here, since we
3598 : // might not have recorded them in first place.
3599 :
3600 : // Remove outdated slots.
3601 : RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3602 : failed_object->address(),
3603 90 : SlotSet::PREFREE_EMPTY_BUCKETS);
3604 : RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3605 45 : failed_object->address());
3606 : // Recompute live bytes.
3607 45 : LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
3608 : // Re-record slots.
3609 45 : EvacuateRecordOnlyVisitor record_visitor(heap());
3610 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3611 : &record_visitor,
3612 45 : LiveObjectVisitor::kKeepMarking);
3613 : // Array buffers will be processed during pointer updating.
3614 : }
3615 : const int aborted_pages =
3616 141938 : static_cast<int>(aborted_evacuation_candidates_.size());
3617 : int aborted_pages_verified = 0;
3618 152320 : for (Page* p : old_space_evacuation_pages_) {
3619 10382 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3620 : // After clearing the evacuation candidate flag the page is again in a
3621 : // regular state.
3622 : p->ClearEvacuationCandidate();
3623 : aborted_pages_verified++;
3624 : } else {
3625 : DCHECK(p->IsEvacuationCandidate());
3626 : DCHECK(p->SweepingDone());
3627 10337 : p->owner()->memory_chunk_list().Remove(p);
3628 : }
3629 : }
3630 : DCHECK_EQ(aborted_pages_verified, aborted_pages);
3631 70969 : if (FLAG_trace_evacuation && (aborted_pages > 0)) {
3632 : PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
3633 0 : isolate()->time_millis_since_init(), aborted_pages);
3634 : }
3635 70969 : }
3636 :
3637 83492 : void MarkCompactCollector::ReleaseEvacuationCandidates() {
3638 177366 : for (Page* p : old_space_evacuation_pages_) {
3639 10382 : if (!p->IsEvacuationCandidate()) continue;
3640 : PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3641 : non_atomic_marking_state()->SetLiveBytes(p, 0);
3642 10337 : CHECK(p->SweepingDone());
3643 10337 : space->ReleasePage(p);
3644 : }
3645 : old_space_evacuation_pages_.clear();
3646 83492 : compacting_ = false;
3647 83492 : }
3648 :
3649 774605 : void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3650 : space->ClearStats();
3651 :
3652 : int will_be_swept = 0;
3653 : bool unused_page_present = false;
3654 :
3655 : // Loop needs to support deletion if live bytes == 0 for a page.
3656 790802 : for (auto it = space->begin(); it != space->end();) {
3657 185 : Page* p = *(it++);
3658 : DCHECK(p->SweepingDone());
3659 :
3660 540326 : if (p->IsEvacuationCandidate()) {
3661 : // Will be processed in Evacuate.
3662 : DCHECK(!evacuation_candidates_.empty());
3663 : continue;
3664 : }
3665 :
3666 529944 : if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3667 : // We need to sweep the page to get it into an iterable state again. Note
3668 : // that this adds unusable memory into the free list that is later on
3669 : // (in the free list) dropped again. Since we only use the flag for
3670 : // testing this is fine.
3671 : p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
3672 : sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3673 : Heap::ShouldZapGarbage()
3674 : ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
3675 185 : : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
3676 : space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
3677 : continue;
3678 : }
3679 :
3680 : // One unused page is kept, all further are released before sweeping them.
3681 529759 : if (non_atomic_marking_state()->live_bytes(p) == 0) {
3682 13537 : if (unused_page_present) {
3683 : if (FLAG_gc_verbose) {
3684 : PrintIsolate(isolate(), "sweeping: released page: %p",
3685 : static_cast<void*>(p));
3686 : }
3687 5815 : ArrayBufferTracker::FreeAll(p);
3688 529759 : space->memory_chunk_list().Remove(p);
3689 5815 : space->ReleasePage(p);
3690 5815 : continue;
3691 : }
3692 : unused_page_present = true;
3693 : }
3694 :
3695 523944 : sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
3696 : will_be_swept++;
3697 : }
3698 :
3699 : if (FLAG_gc_verbose) {
3700 : PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3701 : space->name(), will_be_swept);
3702 : }
3703 250476 : }
3704 :
3705 166984 : void MarkCompactCollector::StartSweepSpaces() {
3706 834920 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3707 : #ifdef DEBUG
3708 : state_ = SWEEP_SPACES;
3709 : #endif
3710 :
3711 : {
3712 : {
3713 : GCTracer::Scope sweep_scope(heap()->tracer(),
3714 83492 : GCTracer::Scope::MC_SWEEP_OLD);
3715 83492 : StartSweepSpace(heap()->old_space());
3716 : }
3717 : {
3718 : GCTracer::Scope sweep_scope(heap()->tracer(),
3719 83492 : GCTracer::Scope::MC_SWEEP_CODE);
3720 83492 : StartSweepSpace(heap()->code_space());
3721 : }
3722 : {
3723 : GCTracer::Scope sweep_scope(heap()->tracer(),
3724 83492 : GCTracer::Scope::MC_SWEEP_MAP);
3725 83492 : StartSweepSpace(heap()->map_space());
3726 : }
3727 83492 : sweeper()->StartSweeping();
3728 83492 : }
3729 83492 : }
3730 :
3731 0 : void MarkCompactCollector::MarkingWorklist::PrintWorklist(
3732 : const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
3733 : std::map<InstanceType, int> count;
3734 0 : int total_count = 0;
3735 0 : worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
3736 0 : ++total_count;
3737 0 : count[obj->map()->instance_type()]++;
3738 0 : });
3739 : std::vector<std::pair<int, InstanceType>> rank;
3740 0 : rank.reserve(count.size());
3741 0 : for (const auto& i : count) {
3742 0 : rank.emplace_back(i.second, i.first);
3743 : }
3744 : std::map<InstanceType, std::string> instance_type_name;
3745 : #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
3746 0 : INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
3747 : #undef INSTANCE_TYPE_NAME
3748 : std::sort(rank.begin(), rank.end(),
3749 0 : std::greater<std::pair<int, InstanceType>>());
3750 0 : PrintF("Worklist %s: %d\n", worklist_name, total_count);
3751 0 : for (auto i : rank) {
3752 0 : PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
3753 : }
3754 0 : }
3755 :
3756 : #ifdef ENABLE_MINOR_MC
3757 :
3758 : namespace {
3759 :
3760 : #ifdef VERIFY_HEAP
3761 :
3762 : class YoungGenerationMarkingVerifier : public MarkingVerifier {
3763 : public:
3764 : explicit YoungGenerationMarkingVerifier(Heap* heap)
3765 : : MarkingVerifier(heap),
3766 : marking_state_(
3767 : heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
3768 :
3769 : Bitmap* bitmap(const MemoryChunk* chunk) override {
3770 : return marking_state_->bitmap(chunk);
3771 : }
3772 :
3773 : bool IsMarked(HeapObject object) override {
3774 : return marking_state_->IsGrey(object);
3775 : }
3776 :
3777 : bool IsBlackOrGrey(HeapObject object) override {
3778 : return marking_state_->IsBlackOrGrey(object);
3779 : }
3780 :
3781 : void Run() override {
3782 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3783 : VerifyMarking(heap_->new_space());
3784 : }
3785 :
3786 : protected:
3787 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3788 : VerifyPointersImpl(start, end);
3789 : }
3790 :
3791 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3792 : VerifyPointersImpl(start, end);
3793 : }
3794 :
3795 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3796 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3797 : VerifyHeapObjectImpl(target);
3798 : }
3799 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3800 : VerifyHeapObjectImpl(rinfo->target_object());
3801 : }
3802 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
3803 : VerifyPointersImpl(start, end);
3804 : }
3805 :
3806 : private:
3807 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
3808 : CHECK_IMPLIES(Heap::InNewSpace(heap_object), IsMarked(heap_object));
3809 : }
3810 :
3811 : template <typename TSlot>
3812 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
3813 : for (TSlot slot = start; slot < end; ++slot) {
3814 : typename TSlot::TObject object = *slot;
3815 : HeapObject heap_object;
3816 : // Minor MC treats weak references as strong.
3817 : if (object.GetHeapObject(&heap_object)) {
3818 : VerifyHeapObjectImpl(heap_object);
3819 : }
3820 : }
3821 : }
3822 :
3823 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3824 : };
3825 :
3826 : class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
3827 : public:
3828 : explicit YoungGenerationEvacuationVerifier(Heap* heap)
3829 : : EvacuationVerifier(heap) {}
3830 :
3831 : void Run() override {
3832 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3833 : VerifyEvacuation(heap_->new_space());
3834 : VerifyEvacuation(heap_->old_space());
3835 : VerifyEvacuation(heap_->code_space());
3836 : VerifyEvacuation(heap_->map_space());
3837 : }
3838 :
3839 : protected:
3840 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
3841 : CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
3842 : }
3843 :
3844 : template <typename TSlot>
3845 : void VerifyPointersImpl(TSlot start, TSlot end) {
3846 : for (TSlot current = start; current < end; ++current) {
3847 : typename TSlot::TObject object = *current;
3848 : HeapObject heap_object;
3849 : if (object.GetHeapObject(&heap_object)) {
3850 : VerifyHeapObjectImpl(heap_object);
3851 : }
3852 : }
3853 : }
3854 :
3855 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3856 : VerifyPointersImpl(start, end);
3857 : }
3858 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3859 : VerifyPointersImpl(start, end);
3860 : }
3861 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3862 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3863 : VerifyHeapObjectImpl(target);
3864 : }
3865 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3866 : VerifyHeapObjectImpl(rinfo->target_object());
3867 : }
3868 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
3869 : VerifyPointersImpl(start, end);
3870 : }
3871 : };
3872 :
3873 : #endif // VERIFY_HEAP
3874 :
3875 : template <class ParallelItem>
3876 0 : void SeedGlobalHandles(GlobalHandles* global_handles, ItemParallelJob* job) {
3877 : // Create batches of global handles.
3878 : const size_t kGlobalHandlesBufferSize = 1000;
3879 : const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
3880 0 : for (size_t start = 0; start < new_space_nodes;
3881 : start += kGlobalHandlesBufferSize) {
3882 0 : size_t end = start + kGlobalHandlesBufferSize;
3883 0 : if (end > new_space_nodes) end = new_space_nodes;
3884 0 : job->AddItem(new ParallelItem(global_handles, start, end));
3885 : }
3886 0 : }
3887 :
3888 0 : bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
3889 : DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
3890 0 : return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
3891 : ->non_atomic_marking_state()
3892 0 : ->IsGrey(HeapObject::cast(*p));
3893 : }
3894 :
3895 : } // namespace
3896 :
3897 125736 : class YoungGenerationMarkingVisitor final
3898 : : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
3899 : public:
3900 : YoungGenerationMarkingVisitor(
3901 : MinorMarkCompactCollector::MarkingState* marking_state,
3902 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
3903 125766 : : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
3904 :
3905 0 : V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
3906 : ObjectSlot end) final {
3907 : VisitPointersImpl(host, start, end);
3908 0 : }
3909 :
3910 0 : V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
3911 : MaybeObjectSlot end) final {
3912 : VisitPointersImpl(host, start, end);
3913 0 : }
3914 :
3915 0 : V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
3916 : VisitPointerImpl(host, slot);
3917 0 : }
3918 :
3919 0 : V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
3920 : VisitPointerImpl(host, slot);
3921 0 : }
3922 :
3923 0 : V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
3924 : // Code objects are not expected in new space.
3925 0 : UNREACHABLE();
3926 : }
3927 :
3928 0 : V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
3929 : // Code objects are not expected in new space.
3930 0 : UNREACHABLE();
3931 : }
3932 :
3933 : private:
3934 : template <typename TSlot>
3935 : V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
3936 0 : for (TSlot slot = start; slot < end; ++slot) {
3937 : VisitPointer(host, slot);
3938 : }
3939 : }
3940 :
3941 : template <typename TSlot>
3942 : V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
3943 0 : typename TSlot::TObject target = *slot;
3944 0 : if (Heap::InNewSpace(target)) {
3945 : // Treat weak references as strong.
3946 : // TODO(marja): Proper weakness handling for minor-mcs.
3947 0 : HeapObject target_object = target.GetHeapObject();
3948 0 : MarkObjectViaMarkingWorklist(target_object);
3949 : }
3950 : }
3951 :
3952 0 : inline void MarkObjectViaMarkingWorklist(HeapObject object) {
3953 0 : if (marking_state_->WhiteToGrey(object)) {
3954 : // Marking deque overflow is unsupported for the young generation.
3955 0 : CHECK(worklist_.Push(object));
3956 : }
3957 0 : }
3958 :
3959 : MinorMarkCompactCollector::MarkingWorklist::View worklist_;
3960 : MinorMarkCompactCollector::MarkingState* marking_state_;
3961 : };
3962 :
3963 62883 : void MinorMarkCompactCollector::SetUp() {}
3964 :
3965 62868 : void MinorMarkCompactCollector::TearDown() {}
3966 :
3967 62883 : MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
3968 : : MarkCompactCollectorBase(heap),
3969 : worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
3970 : main_marking_visitor_(new YoungGenerationMarkingVisitor(
3971 62883 : marking_state(), worklist_, kMainMarker)),
3972 188649 : page_parallel_job_semaphore_(0) {
3973 : static_assert(
3974 : kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
3975 : "more marker tasks than marking deque can handle");
3976 62883 : }
3977 :
3978 188604 : MinorMarkCompactCollector::~MinorMarkCompactCollector() {
3979 62868 : delete worklist_;
3980 62868 : delete main_marking_visitor_;
3981 125736 : }
3982 :
3983 0 : int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
3984 : DCHECK_GT(pages, 0);
3985 0 : if (!FLAG_minor_mc_parallel_marking) return 1;
3986 : // Pages are not private to markers but we can still use them to estimate the
3987 : // amount of marking that is required.
3988 : const int kPagesPerTask = 2;
3989 0 : const int wanted_tasks = Max(1, pages / kPagesPerTask);
3990 : return Min(NumberOfAvailableCores(),
3991 : Min(wanted_tasks,
3992 0 : MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
3993 : }
3994 :
3995 83492 : void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
3996 166984 : for (Page* p : sweep_to_iterate_pages_) {
3997 0 : if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
3998 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
3999 0 : non_atomic_marking_state()->ClearLiveness(p);
4000 : }
4001 : }
4002 : sweep_to_iterate_pages_.clear();
4003 83492 : }
4004 :
4005 0 : class YoungGenerationMigrationObserver final : public MigrationObserver {
4006 : public:
4007 : YoungGenerationMigrationObserver(Heap* heap,
4008 : MarkCompactCollector* mark_compact_collector)
4009 : : MigrationObserver(heap),
4010 0 : mark_compact_collector_(mark_compact_collector) {}
4011 :
4012 0 : inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
4013 : int size) final {
4014 : // Migrate color to old generation marking in case the object survived young
4015 : // generation garbage collection.
4016 0 : if (heap_->incremental_marking()->IsMarking()) {
4017 : DCHECK(
4018 : heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
4019 : heap_->incremental_marking()->TransferColor(src, dst);
4020 : }
4021 0 : }
4022 :
4023 : protected:
4024 : base::Mutex mutex_;
4025 : MarkCompactCollector* mark_compact_collector_;
4026 : };
4027 :
4028 0 : class YoungGenerationRecordMigratedSlotVisitor final
4029 : : public RecordMigratedSlotVisitor {
4030 : public:
4031 : explicit YoungGenerationRecordMigratedSlotVisitor(
4032 : MarkCompactCollector* collector)
4033 0 : : RecordMigratedSlotVisitor(collector) {}
4034 :
4035 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
4036 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
4037 0 : UNREACHABLE();
4038 : }
4039 :
4040 : private:
4041 : // Only record slots for host objects that are considered as live by the full
4042 : // collector.
4043 0 : inline bool IsLive(HeapObject object) {
4044 0 : return collector_->non_atomic_marking_state()->IsBlack(object);
4045 : }
4046 :
4047 0 : inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
4048 : Address slot) final {
4049 0 : if (value->IsStrongOrWeak()) {
4050 : Page* p = Page::FromAddress(value.ptr());
4051 0 : if (p->InNewSpace()) {
4052 : DCHECK_IMPLIES(p->InToSpace(),
4053 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
4054 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
4055 0 : Page::FromAddress(slot), slot);
4056 0 : } else if (p->IsEvacuationCandidate() && IsLive(host)) {
4057 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
4058 0 : Page::FromAddress(slot), slot);
4059 : }
4060 : }
4061 0 : }
4062 : };
4063 :
4064 0 : void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4065 0 : TRACE_GC(heap()->tracer(),
4066 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
4067 :
4068 : PointersUpdatingVisitor updating_visitor;
4069 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
4070 0 : &page_parallel_job_semaphore_);
4071 :
4072 0 : CollectNewSpaceArrayBufferTrackerItems(&updating_job);
4073 : // Create batches of global handles.
4074 : SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
4075 0 : &updating_job);
4076 0 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4077 : int remembered_set_pages = 0;
4078 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4079 : &updating_job, heap()->old_space(),
4080 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4081 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4082 : &updating_job, heap()->code_space(),
4083 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4084 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4085 : &updating_job, heap()->map_space(),
4086 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4087 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4088 : &updating_job, heap()->lo_space(),
4089 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4090 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4091 : &updating_job, heap()->code_lo_space(),
4092 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4093 : const int remembered_set_tasks =
4094 : remembered_set_pages == 0 ? 0
4095 : : NumberOfParallelPointerUpdateTasks(
4096 0 : remembered_set_pages, old_to_new_slots_);
4097 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
4098 0 : for (int i = 0; i < num_tasks; i++) {
4099 : updating_job.AddTask(new PointersUpdatingTask(
4100 : isolate(), GCTracer::BackgroundScope::
4101 0 : MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
4102 : }
4103 :
4104 : {
4105 0 : TRACE_GC(heap()->tracer(),
4106 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4107 0 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
4108 : }
4109 : {
4110 0 : TRACE_GC(heap()->tracer(),
4111 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
4112 0 : updating_job.Run(isolate()->async_counters());
4113 0 : heap()->array_buffer_collector()->FreeAllocations();
4114 : }
4115 :
4116 : {
4117 0 : TRACE_GC(heap()->tracer(),
4118 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
4119 :
4120 0 : EvacuationWeakObjectRetainer evacuation_object_retainer;
4121 0 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4122 :
4123 : // Update pointers from external string table.
4124 : heap()->UpdateNewSpaceReferencesInExternalStringTable(
4125 0 : &UpdateReferenceInExternalStringTableEntry);
4126 0 : }
4127 0 : }
4128 :
4129 0 : class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
4130 : public:
4131 : explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
4132 0 : : collector_(collector) {}
4133 :
4134 0 : void VisitRootPointer(Root root, const char* description,
4135 : FullObjectSlot p) final {
4136 : MarkObjectByPointer(p);
4137 0 : }
4138 :
4139 0 : void VisitRootPointers(Root root, const char* description,
4140 : FullObjectSlot start, FullObjectSlot end) final {
4141 0 : for (FullObjectSlot p = start; p < end; ++p) {
4142 : MarkObjectByPointer(p);
4143 : }
4144 0 : }
4145 :
4146 : private:
4147 : V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
4148 0 : if (!(*p)->IsHeapObject()) return;
4149 0 : collector_->MarkRootObject(HeapObject::cast(*p));
4150 : }
4151 : MinorMarkCompactCollector* const collector_;
4152 : };
4153 :
4154 0 : void MinorMarkCompactCollector::CollectGarbage() {
4155 : {
4156 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
4157 0 : heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
4158 0 : CleanupSweepToIteratePages();
4159 : }
4160 :
4161 0 : MarkLiveObjects();
4162 0 : ClearNonLiveReferences();
4163 : #ifdef VERIFY_HEAP
4164 : if (FLAG_verify_heap) {
4165 : YoungGenerationMarkingVerifier verifier(heap());
4166 : verifier.Run();
4167 : }
4168 : #endif // VERIFY_HEAP
4169 :
4170 0 : Evacuate();
4171 : #ifdef VERIFY_HEAP
4172 : if (FLAG_verify_heap) {
4173 : YoungGenerationEvacuationVerifier verifier(heap());
4174 : verifier.Run();
4175 : }
4176 : #endif // VERIFY_HEAP
4177 :
4178 : {
4179 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
4180 0 : heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
4181 : }
4182 :
4183 : {
4184 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
4185 0 : for (Page* p :
4186 0 : PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
4187 : DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
4188 0 : non_atomic_marking_state()->ClearLiveness(p);
4189 0 : if (FLAG_concurrent_marking) {
4190 : // Ensure that concurrent marker does not track pages that are
4191 : // going to be unmapped.
4192 0 : heap()->concurrent_marking()->ClearMemoryChunkData(p);
4193 : }
4194 0 : }
4195 : }
4196 :
4197 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4198 0 : heap(), [](MemoryChunk* chunk) {
4199 0 : if (chunk->SweepingDone()) {
4200 0 : RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
4201 : } else {
4202 0 : RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
4203 : }
4204 0 : });
4205 :
4206 : heap()->account_external_memory_concurrently_freed();
4207 0 : }
4208 :
4209 0 : void MinorMarkCompactCollector::MakeIterable(
4210 0 : Page* p, MarkingTreatmentMode marking_mode,
4211 : FreeSpaceTreatmentMode free_space_mode) {
4212 : // We have to clear the full collectors markbits for the areas that we
4213 : // remove here.
4214 : MarkCompactCollector* full_collector = heap()->mark_compact_collector();
4215 0 : Address free_start = p->area_start();
4216 :
4217 0 : for (auto object_and_size :
4218 0 : LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
4219 0 : HeapObject const object = object_and_size.first;
4220 : DCHECK(non_atomic_marking_state()->IsGrey(object));
4221 : Address free_end = object->address();
4222 0 : if (free_end != free_start) {
4223 0 : CHECK_GT(free_end, free_start);
4224 0 : size_t size = static_cast<size_t>(free_end - free_start);
4225 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4226 : p->AddressToMarkbitIndex(free_start),
4227 0 : p->AddressToMarkbitIndex(free_end));
4228 0 : if (free_space_mode == ZAP_FREE_SPACE) {
4229 : ZapCode(free_start, size);
4230 : }
4231 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4232 0 : ClearRecordedSlots::kNo);
4233 : }
4234 0 : Map map = object->synchronized_map();
4235 0 : int size = object->SizeFromMap(map);
4236 0 : free_start = free_end + size;
4237 : }
4238 :
4239 0 : if (free_start != p->area_end()) {
4240 0 : CHECK_GT(p->area_end(), free_start);
4241 0 : size_t size = static_cast<size_t>(p->area_end() - free_start);
4242 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4243 : p->AddressToMarkbitIndex(free_start),
4244 0 : p->AddressToMarkbitIndex(p->area_end()));
4245 0 : if (free_space_mode == ZAP_FREE_SPACE) {
4246 : ZapCode(free_start, size);
4247 : }
4248 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4249 0 : ClearRecordedSlots::kNo);
4250 : }
4251 :
4252 0 : if (marking_mode == MarkingTreatmentMode::CLEAR) {
4253 0 : non_atomic_marking_state()->ClearLiveness(p);
4254 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
4255 : }
4256 0 : }
4257 :
4258 : namespace {
4259 :
4260 : // Helper class for pruning the string table.
4261 0 : class YoungGenerationExternalStringTableCleaner : public RootVisitor {
4262 : public:
4263 : YoungGenerationExternalStringTableCleaner(
4264 : MinorMarkCompactCollector* collector)
4265 0 : : heap_(collector->heap()),
4266 0 : marking_state_(collector->non_atomic_marking_state()) {}
4267 :
4268 0 : void VisitRootPointers(Root root, const char* description,
4269 : FullObjectSlot start, FullObjectSlot end) override {
4270 : DCHECK_EQ(static_cast<int>(root),
4271 : static_cast<int>(Root::kExternalStringsTable));
4272 : // Visit all HeapObject pointers in [start, end).
4273 0 : for (FullObjectSlot p = start; p < end; ++p) {
4274 0 : Object o = *p;
4275 0 : if (o->IsHeapObject()) {
4276 : HeapObject heap_object = HeapObject::cast(o);
4277 0 : if (marking_state_->IsWhite(heap_object)) {
4278 0 : if (o->IsExternalString()) {
4279 0 : heap_->FinalizeExternalString(String::cast(*p));
4280 : } else {
4281 : // The original external string may have been internalized.
4282 : DCHECK(o->IsThinString());
4283 : }
4284 : // Set the entry to the_hole_value (as deleted).
4285 0 : p.store(ReadOnlyRoots(heap_).the_hole_value());
4286 : }
4287 : }
4288 : }
4289 0 : }
4290 :
4291 : private:
4292 : Heap* heap_;
4293 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4294 : };
4295 :
4296 : // Marked young generation objects and all old generation objects will be
4297 : // retained.
4298 0 : class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
4299 : public:
4300 : explicit MinorMarkCompactWeakObjectRetainer(
4301 : MinorMarkCompactCollector* collector)
4302 0 : : marking_state_(collector->non_atomic_marking_state()) {}
4303 :
4304 0 : Object RetainAs(Object object) override {
4305 : HeapObject heap_object = HeapObject::cast(object);
4306 0 : if (!Heap::InNewSpace(heap_object)) return object;
4307 :
4308 : // Young generation marking only marks to grey instead of black.
4309 : DCHECK(!marking_state_->IsBlack(heap_object));
4310 0 : if (marking_state_->IsGrey(heap_object)) {
4311 0 : return object;
4312 : }
4313 0 : return Object();
4314 : }
4315 :
4316 : private:
4317 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4318 : };
4319 :
4320 : } // namespace
4321 :
4322 0 : void MinorMarkCompactCollector::ClearNonLiveReferences() {
4323 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
4324 :
4325 : {
4326 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
4327 : // Internalized strings are always stored in old space, so there is no need
4328 : // to clean them here.
4329 : YoungGenerationExternalStringTableCleaner external_visitor(this);
4330 0 : heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
4331 0 : heap()->external_string_table_.CleanUpNewSpaceStrings();
4332 : }
4333 :
4334 : {
4335 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
4336 : // Process the weak references.
4337 : MinorMarkCompactWeakObjectRetainer retainer(this);
4338 0 : heap()->ProcessYoungWeakReferences(&retainer);
4339 0 : }
4340 0 : }
4341 :
4342 0 : void MinorMarkCompactCollector::EvacuatePrologue() {
4343 0 : NewSpace* new_space = heap()->new_space();
4344 : // Append the list of new space pages to be processed.
4345 0 : for (Page* p :
4346 0 : PageRange(new_space->first_allocatable_address(), new_space->top())) {
4347 0 : new_space_evacuation_pages_.push_back(p);
4348 : }
4349 0 : new_space->Flip();
4350 0 : new_space->ResetLinearAllocationArea();
4351 0 : }
4352 :
4353 0 : void MinorMarkCompactCollector::EvacuateEpilogue() {
4354 0 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
4355 : // Give pages that are queued to be freed back to the OS.
4356 0 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4357 0 : }
4358 :
4359 0 : UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4360 : MemoryChunk* chunk, Address start, Address end) {
4361 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4362 0 : chunk, start, end, non_atomic_marking_state());
4363 : }
4364 :
4365 0 : UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4366 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4367 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4368 0 : heap(), non_atomic_marking_state(), chunk, updating_mode);
4369 : }
4370 :
4371 : class MarkingItem;
4372 : class GlobalHandlesMarkingItem;
4373 : class PageMarkingItem;
4374 : class RootMarkingItem;
4375 : class YoungGenerationMarkingTask;
4376 :
4377 0 : class MarkingItem : public ItemParallelJob::Item {
4378 : public:
4379 0 : ~MarkingItem() override = default;
4380 : virtual void Process(YoungGenerationMarkingTask* task) = 0;
4381 : };
4382 :
4383 0 : class YoungGenerationMarkingTask : public ItemParallelJob::Task {
4384 : public:
4385 0 : YoungGenerationMarkingTask(
4386 : Isolate* isolate, MinorMarkCompactCollector* collector,
4387 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4388 : : ItemParallelJob::Task(isolate),
4389 : collector_(collector),
4390 : marking_worklist_(global_worklist, task_id),
4391 0 : marking_state_(collector->marking_state()),
4392 0 : visitor_(marking_state_, global_worklist, task_id) {
4393 0 : local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4394 0 : Page::kPageSize);
4395 0 : }
4396 :
4397 0 : void RunInParallel() override {
4398 0 : TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
4399 : GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4400 0 : double marking_time = 0.0;
4401 : {
4402 : TimedScope scope(&marking_time);
4403 : MarkingItem* item = nullptr;
4404 0 : while ((item = GetItem<MarkingItem>()) != nullptr) {
4405 0 : item->Process(this);
4406 0 : item->MarkFinished();
4407 0 : EmptyLocalMarkingWorklist();
4408 : }
4409 0 : EmptyMarkingWorklist();
4410 : DCHECK(marking_worklist_.IsLocalEmpty());
4411 0 : FlushLiveBytes();
4412 : }
4413 0 : if (FLAG_trace_minor_mc_parallel_marking) {
4414 0 : PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
4415 0 : static_cast<void*>(this), marking_time);
4416 0 : }
4417 0 : };
4418 :
4419 0 : void MarkObject(Object object) {
4420 0 : if (!Heap::InNewSpace(object)) return;
4421 : HeapObject heap_object = HeapObject::cast(object);
4422 0 : if (marking_state_->WhiteToGrey(heap_object)) {
4423 : const int size = visitor_.Visit(heap_object);
4424 0 : IncrementLiveBytes(heap_object, size);
4425 : }
4426 : }
4427 :
4428 : private:
4429 0 : void EmptyLocalMarkingWorklist() {
4430 0 : HeapObject object;
4431 0 : while (marking_worklist_.Pop(&object)) {
4432 : const int size = visitor_.Visit(object);
4433 0 : IncrementLiveBytes(object, size);
4434 : }
4435 0 : }
4436 :
4437 0 : void EmptyMarkingWorklist() {
4438 0 : HeapObject object;
4439 0 : while (marking_worklist_.Pop(&object)) {
4440 : const int size = visitor_.Visit(object);
4441 0 : IncrementLiveBytes(object, size);
4442 : }
4443 0 : }
4444 :
4445 : void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
4446 0 : local_live_bytes_[Page::FromHeapObject(object)] += bytes;
4447 : }
4448 :
4449 0 : void FlushLiveBytes() {
4450 0 : for (auto pair : local_live_bytes_) {
4451 : marking_state_->IncrementLiveBytes(pair.first, pair.second);
4452 : }
4453 0 : }
4454 :
4455 : MinorMarkCompactCollector* collector_;
4456 : MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4457 : MinorMarkCompactCollector::MarkingState* marking_state_;
4458 : YoungGenerationMarkingVisitor visitor_;
4459 : std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4460 : };
4461 :
4462 : class PageMarkingItem : public MarkingItem {
4463 : public:
4464 : explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
4465 0 : : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
4466 0 : ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
4467 :
4468 0 : void Process(YoungGenerationMarkingTask* task) override {
4469 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4470 : "PageMarkingItem::Process");
4471 0 : base::MutexGuard guard(chunk_->mutex());
4472 : MarkUntypedPointers(task);
4473 0 : MarkTypedPointers(task);
4474 0 : }
4475 :
4476 : private:
4477 0 : inline Heap* heap() { return chunk_->heap(); }
4478 :
4479 : void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4480 : RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
4481 0 : [this, task](MaybeObjectSlot slot) {
4482 0 : return CheckAndMarkObject(task, slot);
4483 0 : },
4484 0 : SlotSet::PREFREE_EMPTY_BUCKETS);
4485 : }
4486 :
4487 : void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4488 : RememberedSet<OLD_TO_NEW>::IterateTyped(
4489 0 : chunk_, [=](SlotType slot_type, Address slot) {
4490 : return UpdateTypedSlotHelper::UpdateTypedSlot(
4491 0 : heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
4492 0 : return CheckAndMarkObject(task, slot);
4493 0 : });
4494 0 : });
4495 : }
4496 :
4497 : template <typename TSlot>
4498 : V8_INLINE SlotCallbackResult
4499 : CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
4500 : static_assert(
4501 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
4502 : std::is_same<TSlot, MaybeObjectSlot>::value,
4503 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
4504 0 : MaybeObject object = *slot;
4505 0 : if (Heap::InNewSpace(object)) {
4506 : // Marking happens before flipping the young generation, so the object
4507 : // has to be in ToSpace.
4508 : DCHECK(Heap::InToSpace(object));
4509 0 : HeapObject heap_object;
4510 0 : bool success = object.GetHeapObject(&heap_object);
4511 : USE(success);
4512 : DCHECK(success);
4513 0 : task->MarkObject(heap_object);
4514 0 : slots_++;
4515 : return KEEP_SLOT;
4516 : }
4517 : return REMOVE_SLOT;
4518 : }
4519 :
4520 : MemoryChunk* chunk_;
4521 : std::atomic<int>* global_slots_;
4522 : int slots_;
4523 : };
4524 :
4525 : class GlobalHandlesMarkingItem : public MarkingItem {
4526 : public:
4527 : GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
4528 : size_t end)
4529 0 : : global_handles_(global_handles), start_(start), end_(end) {}
4530 0 : ~GlobalHandlesMarkingItem() override = default;
4531 :
4532 0 : void Process(YoungGenerationMarkingTask* task) override {
4533 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4534 : "GlobalHandlesMarkingItem::Process");
4535 : GlobalHandlesRootMarkingVisitor visitor(task);
4536 : global_handles_
4537 : ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
4538 0 : &visitor, start_, end_);
4539 0 : }
4540 :
4541 : private:
4542 0 : class GlobalHandlesRootMarkingVisitor : public RootVisitor {
4543 : public:
4544 : explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
4545 0 : : task_(task) {}
4546 :
4547 0 : void VisitRootPointer(Root root, const char* description,
4548 : FullObjectSlot p) override {
4549 : DCHECK_EQ(Root::kGlobalHandles, root);
4550 0 : task_->MarkObject(*p);
4551 0 : }
4552 :
4553 0 : void VisitRootPointers(Root root, const char* description,
4554 : FullObjectSlot start, FullObjectSlot end) override {
4555 : DCHECK_EQ(Root::kGlobalHandles, root);
4556 0 : for (FullObjectSlot p = start; p < end; ++p) {
4557 0 : task_->MarkObject(*p);
4558 : }
4559 0 : }
4560 :
4561 : private:
4562 : YoungGenerationMarkingTask* task_;
4563 : };
4564 :
4565 : GlobalHandles* global_handles_;
4566 : size_t start_;
4567 : size_t end_;
4568 : };
4569 :
4570 0 : void MinorMarkCompactCollector::MarkRootSetInParallel(
4571 0 : RootMarkingVisitor* root_visitor) {
4572 : std::atomic<int> slots;
4573 : {
4574 : ItemParallelJob job(isolate()->cancelable_task_manager(),
4575 0 : &page_parallel_job_semaphore_);
4576 :
4577 : // Seed the root set (roots + old->new set).
4578 : {
4579 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4580 0 : heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
4581 : // Create batches of global handles.
4582 : SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
4583 0 : &job);
4584 : // Create items for each page.
4585 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4586 0 : heap(), [&job, &slots](MemoryChunk* chunk) {
4587 0 : job.AddItem(new PageMarkingItem(chunk, &slots));
4588 0 : });
4589 : }
4590 :
4591 : // Add tasks and run in parallel.
4592 : {
4593 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4594 : const int new_space_pages =
4595 0 : static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
4596 0 : const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
4597 0 : for (int i = 0; i < num_tasks; i++) {
4598 : job.AddTask(
4599 0 : new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
4600 : }
4601 0 : job.Run(isolate()->async_counters());
4602 0 : DCHECK(worklist()->IsEmpty());
4603 0 : }
4604 : }
4605 0 : old_to_new_slots_ = slots;
4606 0 : }
4607 :
4608 0 : void MinorMarkCompactCollector::MarkLiveObjects() {
4609 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4610 :
4611 : PostponeInterruptsScope postpone(isolate());
4612 :
4613 : RootMarkingVisitor root_visitor(this);
4614 :
4615 0 : MarkRootSetInParallel(&root_visitor);
4616 :
4617 : // Mark rest on the main thread.
4618 : {
4619 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4620 0 : ProcessMarkingWorklist();
4621 : }
4622 :
4623 : {
4624 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4625 : isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
4626 0 : &IsUnmarkedObjectForYoungGeneration);
4627 : isolate()
4628 : ->global_handles()
4629 0 : ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
4630 : isolate()
4631 : ->global_handles()
4632 : ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
4633 0 : &root_visitor, &IsUnmarkedObjectForYoungGeneration);
4634 0 : ProcessMarkingWorklist();
4635 0 : }
4636 0 : }
4637 :
4638 0 : void MinorMarkCompactCollector::ProcessMarkingWorklist() {
4639 : MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
4640 0 : HeapObject object;
4641 0 : while (marking_worklist.Pop(&object)) {
4642 : DCHECK(!object->IsFiller());
4643 : DCHECK(object->IsHeapObject());
4644 : DCHECK(heap()->Contains(object));
4645 : DCHECK(non_atomic_marking_state()->IsGrey(object));
4646 : main_marking_visitor()->Visit(object);
4647 : }
4648 : DCHECK(marking_worklist.IsLocalEmpty());
4649 0 : }
4650 :
4651 0 : void MinorMarkCompactCollector::Evacuate() {
4652 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
4653 0 : base::MutexGuard guard(heap()->relocation_mutex());
4654 :
4655 : {
4656 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
4657 0 : EvacuatePrologue();
4658 : }
4659 :
4660 : {
4661 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
4662 0 : EvacuatePagesInParallel();
4663 : }
4664 :
4665 0 : UpdatePointersAfterEvacuation();
4666 :
4667 : {
4668 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
4669 0 : if (!heap()->new_space()->Rebalance()) {
4670 0 : heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4671 0 : }
4672 : }
4673 :
4674 : {
4675 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
4676 0 : for (Page* p : new_space_evacuation_pages_) {
4677 0 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
4678 : p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4679 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
4680 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4681 : p->SetFlag(Page::SWEEP_TO_ITERATE);
4682 0 : sweep_to_iterate_pages_.push_back(p);
4683 : }
4684 : }
4685 0 : new_space_evacuation_pages_.clear();
4686 : }
4687 :
4688 : {
4689 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
4690 0 : EvacuateEpilogue();
4691 0 : }
4692 0 : }
4693 :
4694 : namespace {
4695 :
4696 0 : class YoungGenerationEvacuator : public Evacuator {
4697 : public:
4698 : YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
4699 : RecordMigratedSlotVisitor* record_visitor)
4700 0 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
4701 :
4702 0 : GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
4703 0 : return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
4704 : }
4705 :
4706 : protected:
4707 : void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
4708 :
4709 : MinorMarkCompactCollector* collector_;
4710 : };
4711 :
4712 0 : void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
4713 : intptr_t* live_bytes) {
4714 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4715 : "YoungGenerationEvacuator::RawEvacuatePage");
4716 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
4717 0 : collector_->non_atomic_marking_state();
4718 0 : *live_bytes = marking_state->live_bytes(chunk);
4719 0 : switch (ComputeEvacuationMode(chunk)) {
4720 : case kObjectsNewToOld:
4721 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4722 : chunk, marking_state, &new_space_visitor_,
4723 0 : LiveObjectVisitor::kClearMarkbits);
4724 : // ArrayBufferTracker will be updated during pointers updating.
4725 0 : break;
4726 : case kPageNewToOld:
4727 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4728 : chunk, marking_state, &new_to_old_page_visitor_,
4729 0 : LiveObjectVisitor::kKeepMarking);
4730 : new_to_old_page_visitor_.account_moved_bytes(
4731 : marking_state->live_bytes(chunk));
4732 0 : if (chunk->owner()->identity() != NEW_LO_SPACE) {
4733 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
4734 : // delay it until the next GC.
4735 0 : ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4736 0 : if (heap()->ShouldZapGarbage()) {
4737 : collector_->MakeIterable(static_cast<Page*>(chunk),
4738 : MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4739 0 : } else if (heap()->incremental_marking()->IsMarking()) {
4740 : // When incremental marking is on, we need to clear the mark bits of
4741 : // the full collector. We cannot yet discard the young generation mark
4742 : // bits as they are still relevant for pointers updating.
4743 : collector_->MakeIterable(static_cast<Page*>(chunk),
4744 : MarkingTreatmentMode::KEEP,
4745 0 : IGNORE_FREE_SPACE);
4746 : }
4747 : }
4748 : break;
4749 : case kPageNewToNew:
4750 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4751 : chunk, marking_state, &new_to_new_page_visitor_,
4752 0 : LiveObjectVisitor::kKeepMarking);
4753 : new_to_new_page_visitor_.account_moved_bytes(
4754 : marking_state->live_bytes(chunk));
4755 : DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
4756 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
4757 : // delay it until the next GC.
4758 0 : ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4759 : if (heap()->ShouldZapGarbage()) {
4760 : collector_->MakeIterable(static_cast<Page*>(chunk),
4761 : MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4762 0 : } else if (heap()->incremental_marking()->IsMarking()) {
4763 : // When incremental marking is on, we need to clear the mark bits of
4764 : // the full collector. We cannot yet discard the young generation mark
4765 : // bits as they are still relevant for pointers updating.
4766 : collector_->MakeIterable(static_cast<Page*>(chunk),
4767 0 : MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
4768 : }
4769 : break;
4770 : case kObjectsOldToOld:
4771 0 : UNREACHABLE();
4772 : break;
4773 0 : }
4774 0 : }
4775 :
4776 : } // namespace
4777 :
4778 0 : void MinorMarkCompactCollector::EvacuatePagesInParallel() {
4779 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
4780 0 : &page_parallel_job_semaphore_);
4781 : intptr_t live_bytes = 0;
4782 :
4783 0 : for (Page* page : new_space_evacuation_pages_) {
4784 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
4785 0 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
4786 0 : live_bytes += live_bytes_on_page;
4787 0 : if (ShouldMovePage(page, live_bytes_on_page)) {
4788 0 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
4789 0 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
4790 : } else {
4791 0 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
4792 : }
4793 : }
4794 0 : evacuation_job.AddItem(new EvacuationItem(page));
4795 : }
4796 0 : if (evacuation_job.NumberOfItems() == 0) return;
4797 :
4798 : YoungGenerationMigrationObserver observer(heap(),
4799 0 : heap()->mark_compact_collector());
4800 : YoungGenerationRecordMigratedSlotVisitor record_visitor(
4801 0 : heap()->mark_compact_collector());
4802 : CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
4803 0 : this, &evacuation_job, &record_visitor, &observer, live_bytes);
4804 : }
4805 :
4806 0 : int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4807 : ItemParallelJob* job) {
4808 : int pages = 0;
4809 0 : for (Page* p : new_space_evacuation_pages_) {
4810 0 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4811 0 : if (p->local_tracker() == nullptr) continue;
4812 :
4813 0 : pages++;
4814 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4815 0 : p, ArrayBufferTrackerUpdatingItem::kRegular));
4816 : }
4817 : }
4818 0 : return pages;
4819 : }
4820 :
4821 : #endif // ENABLE_MINOR_MC
4822 :
4823 : } // namespace internal
4824 183867 : } // namespace v8
|