Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/mark-compact.h"
6 :
7 : #include <unordered_map>
8 :
9 : #include "src/base/utils/random-number-generator.h"
10 : #include "src/cancelable-task.h"
11 : #include "src/compilation-cache.h"
12 : #include "src/deoptimizer.h"
13 : #include "src/execution.h"
14 : #include "src/frames-inl.h"
15 : #include "src/global-handles.h"
16 : #include "src/heap/array-buffer-collector.h"
17 : #include "src/heap/array-buffer-tracker-inl.h"
18 : #include "src/heap/gc-tracer.h"
19 : #include "src/heap/incremental-marking-inl.h"
20 : #include "src/heap/invalidated-slots-inl.h"
21 : #include "src/heap/item-parallel-job.h"
22 : #include "src/heap/local-allocator-inl.h"
23 : #include "src/heap/mark-compact-inl.h"
24 : #include "src/heap/object-stats.h"
25 : #include "src/heap/objects-visiting-inl.h"
26 : #include "src/heap/spaces-inl.h"
27 : #include "src/heap/sweeper.h"
28 : #include "src/heap/worklist.h"
29 : #include "src/ic/stub-cache.h"
30 : #include "src/objects/embedder-data-array-inl.h"
31 : #include "src/objects/foreign.h"
32 : #include "src/objects/hash-table-inl.h"
33 : #include "src/objects/js-objects-inl.h"
34 : #include "src/objects/maybe-object.h"
35 : #include "src/objects/slots-inl.h"
36 : #include "src/transitions-inl.h"
37 : #include "src/utils-inl.h"
38 : #include "src/v8.h"
39 : #include "src/vm-state-inl.h"
40 :
41 : namespace v8 {
42 : namespace internal {
43 :
44 : const char* Marking::kWhiteBitPattern = "00";
45 : const char* Marking::kBlackBitPattern = "11";
46 : const char* Marking::kGreyBitPattern = "10";
47 : const char* Marking::kImpossibleBitPattern = "01";
48 :
49 : // The following has to hold in order for {MarkingState::MarkBitFrom} to not
50 : // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
51 : STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
52 :
53 : // =============================================================================
54 : // Verifiers
55 : // =============================================================================
56 :
57 : #ifdef VERIFY_HEAP
58 : namespace {
59 :
60 : class MarkingVerifier : public ObjectVisitor, public RootVisitor {
61 : public:
62 : virtual void Run() = 0;
63 :
64 : protected:
65 : explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
66 :
67 : virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
68 :
69 : virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
70 : virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
71 : virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
72 :
73 : virtual bool IsMarked(HeapObject object) = 0;
74 :
75 : virtual bool IsBlackOrGrey(HeapObject object) = 0;
76 :
77 : void VisitPointers(HeapObject host, ObjectSlot start,
78 : ObjectSlot end) override {
79 : VerifyPointers(start, end);
80 : }
81 :
82 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
83 : MaybeObjectSlot end) override {
84 : VerifyPointers(start, end);
85 : }
86 :
87 : void VisitRootPointers(Root root, const char* description,
88 : FullObjectSlot start, FullObjectSlot end) override {
89 : VerifyRootPointers(start, end);
90 : }
91 :
92 : void VerifyRoots(VisitMode mode);
93 : void VerifyMarkingOnPage(const Page* page, Address start, Address end);
94 : void VerifyMarking(NewSpace* new_space);
95 : void VerifyMarking(PagedSpace* paged_space);
96 : void VerifyMarking(LargeObjectSpace* lo_space);
97 :
98 : Heap* heap_;
99 : };
100 :
101 : void MarkingVerifier::VerifyRoots(VisitMode mode) {
102 : heap_->IterateStrongRoots(this, mode);
103 : }
104 :
105 : void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
106 : Address end) {
107 : HeapObject object;
108 : Address next_object_must_be_here_or_later = start;
109 : for (Address current = start; current < end;) {
110 : object = HeapObject::FromAddress(current);
111 : // One word fillers at the end of a black area can be grey.
112 : if (IsBlackOrGrey(object) &&
113 : object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
114 : CHECK(IsMarked(object));
115 : CHECK(current >= next_object_must_be_here_or_later);
116 : object->Iterate(this);
117 : next_object_must_be_here_or_later = current + object->Size();
118 : // The object is either part of a black area of black allocation or a
119 : // regular black object
120 : CHECK(
121 : bitmap(page)->AllBitsSetInRange(
122 : page->AddressToMarkbitIndex(current),
123 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
124 : bitmap(page)->AllBitsClearInRange(
125 : page->AddressToMarkbitIndex(current + kTaggedSize * 2),
126 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
127 : current = next_object_must_be_here_or_later;
128 : } else {
129 : current += kTaggedSize;
130 : }
131 : }
132 : }
133 :
134 : void MarkingVerifier::VerifyMarking(NewSpace* space) {
135 : Address end = space->top();
136 : // The bottom position is at the start of its page. Allows us to use
137 : // page->area_start() as start of range on all pages.
138 : CHECK_EQ(space->first_allocatable_address(),
139 : space->first_page()->area_start());
140 :
141 : PageRange range(space->first_allocatable_address(), end);
142 : for (auto it = range.begin(); it != range.end();) {
143 : Page* page = *(it++);
144 : Address limit = it != range.end() ? page->area_end() : end;
145 : CHECK(limit == end || !page->Contains(end));
146 : VerifyMarkingOnPage(page, page->area_start(), limit);
147 : }
148 : }
149 :
150 : void MarkingVerifier::VerifyMarking(PagedSpace* space) {
151 : for (Page* p : *space) {
152 : VerifyMarkingOnPage(p, p->area_start(), p->area_end());
153 : }
154 : }
155 :
156 : void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
157 : LargeObjectIterator it(lo_space);
158 : for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
159 : if (IsBlackOrGrey(obj)) {
160 : obj->Iterate(this);
161 : }
162 : }
163 : }
164 :
165 : class FullMarkingVerifier : public MarkingVerifier {
166 : public:
167 : explicit FullMarkingVerifier(Heap* heap)
168 : : MarkingVerifier(heap),
169 : marking_state_(
170 : heap->mark_compact_collector()->non_atomic_marking_state()) {}
171 :
172 : void Run() override {
173 : VerifyRoots(VISIT_ONLY_STRONG);
174 : VerifyMarking(heap_->new_space());
175 : VerifyMarking(heap_->new_lo_space());
176 : VerifyMarking(heap_->old_space());
177 : VerifyMarking(heap_->code_space());
178 : VerifyMarking(heap_->map_space());
179 : VerifyMarking(heap_->lo_space());
180 : VerifyMarking(heap_->code_lo_space());
181 : }
182 :
183 : protected:
184 : Bitmap* bitmap(const MemoryChunk* chunk) override {
185 : return marking_state_->bitmap(chunk);
186 : }
187 :
188 : bool IsMarked(HeapObject object) override {
189 : return marking_state_->IsBlack(object);
190 : }
191 :
192 : bool IsBlackOrGrey(HeapObject object) override {
193 : return marking_state_->IsBlackOrGrey(object);
194 : }
195 :
196 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
197 : VerifyPointersImpl(start, end);
198 : }
199 :
200 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
201 : VerifyPointersImpl(start, end);
202 : }
203 :
204 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
205 : VerifyPointersImpl(start, end);
206 : }
207 :
208 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
209 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
210 : VerifyHeapObjectImpl(target);
211 : }
212 :
213 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
214 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
215 : if (!host->IsWeakObject(rinfo->target_object())) {
216 : HeapObject object = rinfo->target_object();
217 : VerifyHeapObjectImpl(object);
218 : }
219 : }
220 :
221 : private:
222 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
223 : CHECK(marking_state_->IsBlackOrGrey(heap_object));
224 : }
225 :
226 : template <typename TSlot>
227 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
228 : for (TSlot slot = start; slot < end; ++slot) {
229 : typename TSlot::TObject object = *slot;
230 : HeapObject heap_object;
231 : if (object.GetHeapObjectIfStrong(&heap_object)) {
232 : VerifyHeapObjectImpl(heap_object);
233 : }
234 : }
235 : }
236 :
237 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
238 : };
239 :
240 : class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
241 : public:
242 : virtual void Run() = 0;
243 :
244 : void VisitPointers(HeapObject host, ObjectSlot start,
245 : ObjectSlot end) override {
246 : VerifyPointers(start, end);
247 : }
248 :
249 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
250 : MaybeObjectSlot end) override {
251 : VerifyPointers(start, end);
252 : }
253 :
254 : void VisitRootPointers(Root root, const char* description,
255 : FullObjectSlot start, FullObjectSlot end) override {
256 : VerifyRootPointers(start, end);
257 : }
258 :
259 : protected:
260 : explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
261 :
262 : inline Heap* heap() { return heap_; }
263 :
264 : virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
265 : virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
266 : virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
267 :
268 : void VerifyRoots(VisitMode mode);
269 : void VerifyEvacuationOnPage(Address start, Address end);
270 : void VerifyEvacuation(NewSpace* new_space);
271 : void VerifyEvacuation(PagedSpace* paged_space);
272 :
273 : Heap* heap_;
274 : };
275 :
276 : void EvacuationVerifier::VerifyRoots(VisitMode mode) {
277 : heap_->IterateStrongRoots(this, mode);
278 : }
279 :
280 : void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
281 : Address current = start;
282 : while (current < end) {
283 : HeapObject object = HeapObject::FromAddress(current);
284 : if (!object->IsFiller()) object->Iterate(this);
285 : current += object->Size();
286 : }
287 : }
288 :
289 : void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
290 : PageRange range(space->first_allocatable_address(), space->top());
291 : for (auto it = range.begin(); it != range.end();) {
292 : Page* page = *(it++);
293 : Address current = page->area_start();
294 : Address limit = it != range.end() ? page->area_end() : space->top();
295 : CHECK(limit == space->top() || !page->Contains(space->top()));
296 : VerifyEvacuationOnPage(current, limit);
297 : }
298 : }
299 :
300 : void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
301 : for (Page* p : *space) {
302 : if (p->IsEvacuationCandidate()) continue;
303 : if (p->Contains(space->top())) {
304 : CodePageMemoryModificationScope memory_modification_scope(p);
305 : heap_->CreateFillerObjectAt(
306 : space->top(), static_cast<int>(space->limit() - space->top()),
307 : ClearRecordedSlots::kNo);
308 : }
309 : VerifyEvacuationOnPage(p->area_start(), p->area_end());
310 : }
311 : }
312 :
313 : class FullEvacuationVerifier : public EvacuationVerifier {
314 : public:
315 : explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
316 :
317 : void Run() override {
318 : VerifyRoots(VISIT_ALL);
319 : VerifyEvacuation(heap_->new_space());
320 : VerifyEvacuation(heap_->old_space());
321 : VerifyEvacuation(heap_->code_space());
322 : VerifyEvacuation(heap_->map_space());
323 : }
324 :
325 : protected:
326 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
327 : CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
328 : Heap::InToPage(heap_object));
329 : CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
330 : }
331 :
332 : template <typename TSlot>
333 : void VerifyPointersImpl(TSlot start, TSlot end) {
334 : for (TSlot current = start; current < end; ++current) {
335 : typename TSlot::TObject object = *current;
336 : HeapObject heap_object;
337 : if (object.GetHeapObjectIfStrong(&heap_object)) {
338 : VerifyHeapObjectImpl(heap_object);
339 : }
340 : }
341 : }
342 :
343 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
344 : VerifyPointersImpl(start, end);
345 : }
346 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
347 : VerifyPointersImpl(start, end);
348 : }
349 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
350 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
351 : VerifyHeapObjectImpl(target);
352 : }
353 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
354 : VerifyHeapObjectImpl(rinfo->target_object());
355 : }
356 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
357 : VerifyPointersImpl(start, end);
358 : }
359 : };
360 :
361 : } // namespace
362 : #endif // VERIFY_HEAP
363 :
364 : // =============================================================================
365 : // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
366 : // =============================================================================
367 :
368 : using MarkCompactMarkingVisitor =
369 : MarkingVisitor<FixedArrayVisitationMode::kRegular,
370 : TraceRetainingPathMode::kEnabled,
371 : MarkCompactCollector::MarkingState>;
372 :
373 : namespace {
374 :
375 284998 : int NumberOfAvailableCores() {
376 284998 : static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
377 : // This number of cores should be greater than zero and never change.
378 : DCHECK_GE(num_cores, 1);
379 : DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
380 284998 : return num_cores;
381 : }
382 :
383 : } // namespace
384 :
385 62380 : int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
386 : DCHECK_GT(pages, 0);
387 : int tasks =
388 62380 : FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
389 62380 : if (!heap_->CanExpandOldGeneration(
390 62380 : static_cast<size_t>(tasks * Page::kPageSize))) {
391 : // Optimize for memory usage near the heap limit.
392 : tasks = 1;
393 : }
394 62380 : return tasks;
395 : }
396 :
397 148527 : int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
398 : int slots) {
399 : DCHECK_GT(pages, 0);
400 : // Limit the number of update tasks as task creation often dominates the
401 : // actual work that is being done.
402 : const int kMaxPointerUpdateTasks = 8;
403 : const int kSlotsPerTask = 600;
404 : const int wanted_tasks =
405 148527 : (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
406 : return FLAG_parallel_pointer_update
407 : ? Min(kMaxPointerUpdateTasks,
408 148359 : Min(NumberOfAvailableCores(), wanted_tasks))
409 297054 : : 1;
410 : }
411 :
412 0 : int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
413 : int pages) {
414 : DCHECK_GT(pages, 0);
415 : // No cap needed because all pages we need to process are fully filled with
416 : // interesting objects.
417 74426 : return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
418 74510 : : 1;
419 : }
420 :
421 61048 : MarkCompactCollector::MarkCompactCollector(Heap* heap)
422 : : MarkCompactCollectorBase(heap),
423 : page_parallel_job_semaphore_(0),
424 : #ifdef DEBUG
425 : state_(IDLE),
426 : #endif
427 : was_marked_incrementally_(false),
428 : evacuation_(false),
429 : compacting_(false),
430 : black_allocation_(false),
431 : have_code_to_deoptimize_(false),
432 : marking_worklist_(heap),
433 122097 : sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
434 61049 : old_to_new_slots_ = -1;
435 61049 : }
436 :
437 244136 : MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
438 :
439 61049 : void MarkCompactCollector::SetUp() {
440 : DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
441 : DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
442 : DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
443 : DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
444 61049 : }
445 :
446 61034 : void MarkCompactCollector::TearDown() {
447 61034 : AbortCompaction();
448 61033 : AbortWeakObjects();
449 122068 : if (heap()->incremental_marking()->IsMarking()) {
450 5300 : marking_worklist()->Clear();
451 : }
452 61034 : }
453 :
454 0 : void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
455 : DCHECK(!p->NeverEvacuate());
456 10545 : p->MarkEvacuationCandidate();
457 10545 : evacuation_candidates_.push_back(p);
458 0 : }
459 :
460 :
461 0 : static void TraceFragmentation(PagedSpace* space) {
462 0 : int number_of_pages = space->CountTotalPages();
463 0 : intptr_t reserved = (number_of_pages * space->AreaSize());
464 0 : intptr_t free = reserved - space->SizeOfObjects();
465 : PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
466 0 : static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
467 0 : }
468 :
469 79810 : bool MarkCompactCollector::StartCompaction() {
470 79810 : if (!compacting_) {
471 : DCHECK(evacuation_candidates_.empty());
472 :
473 159620 : CollectEvacuationCandidates(heap()->old_space());
474 :
475 79810 : if (FLAG_compact_code_space) {
476 79810 : CollectEvacuationCandidates(heap()->code_space());
477 0 : } else if (FLAG_trace_fragmentation) {
478 0 : TraceFragmentation(heap()->code_space());
479 : }
480 :
481 79810 : if (FLAG_trace_fragmentation) {
482 0 : TraceFragmentation(heap()->map_space());
483 : }
484 :
485 79810 : compacting_ = !evacuation_candidates_.empty();
486 : }
487 :
488 79810 : return compacting_;
489 : }
490 :
491 74510 : void MarkCompactCollector::CollectGarbage() {
492 : // Make sure that Prepare() has been called. The individual steps below will
493 : // update the state as they proceed.
494 : DCHECK(state_ == PREPARE_GC);
495 :
496 : #ifdef ENABLE_MINOR_MC
497 74510 : heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
498 : #endif // ENABLE_MINOR_MC
499 :
500 74510 : MarkLiveObjects();
501 74510 : ClearNonLiveReferences();
502 74510 : VerifyMarking();
503 :
504 74510 : RecordObjectStats();
505 :
506 74510 : StartSweepSpaces();
507 :
508 74510 : Evacuate();
509 :
510 74510 : Finish();
511 74510 : }
512 :
513 : #ifdef VERIFY_HEAP
514 : void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
515 : HeapObjectIterator iterator(space);
516 : for (HeapObject object = iterator.Next(); !object.is_null();
517 : object = iterator.Next()) {
518 : CHECK(non_atomic_marking_state()->IsBlack(object));
519 : }
520 : }
521 :
522 : void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
523 : for (Page* p : *space) {
524 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
525 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
526 : }
527 : }
528 :
529 :
530 : void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
531 : for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
532 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
533 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
534 : }
535 : }
536 :
537 : void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
538 : LargeObjectIterator it(space);
539 : for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
540 : CHECK(non_atomic_marking_state()->IsWhite(obj));
541 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
542 : MemoryChunk::FromHeapObject(obj)));
543 : }
544 : }
545 :
546 : void MarkCompactCollector::VerifyMarkbitsAreClean() {
547 : VerifyMarkbitsAreClean(heap_->old_space());
548 : VerifyMarkbitsAreClean(heap_->code_space());
549 : VerifyMarkbitsAreClean(heap_->map_space());
550 : VerifyMarkbitsAreClean(heap_->new_space());
551 : // Read-only space should always be black since we never collect any objects
552 : // in it or linked from it.
553 : VerifyMarkbitsAreDirty(heap_->read_only_space());
554 : VerifyMarkbitsAreClean(heap_->lo_space());
555 : VerifyMarkbitsAreClean(heap_->code_lo_space());
556 : VerifyMarkbitsAreClean(heap_->new_lo_space());
557 : }
558 :
559 : #endif // VERIFY_HEAP
560 :
561 174960 : void MarkCompactCollector::EnsureSweepingCompleted() {
562 349920 : if (!sweeper()->sweeping_in_progress()) return;
563 :
564 74510 : sweeper()->EnsureCompleted();
565 149020 : heap()->old_space()->RefillFreeList();
566 74510 : heap()->code_space()->RefillFreeList();
567 74510 : heap()->map_space()->RefillFreeList();
568 :
569 : #ifdef VERIFY_HEAP
570 : if (FLAG_verify_heap && !evacuation()) {
571 : FullEvacuationVerifier verifier(heap());
572 : verifier.Run();
573 : }
574 : #endif
575 : }
576 :
577 157052 : void MarkCompactCollector::ComputeEvacuationHeuristics(
578 : size_t area_size, int* target_fragmentation_percent,
579 : size_t* max_evacuated_bytes) {
580 : // For memory reducing and optimize for memory mode we directly define both
581 : // constants.
582 : const int kTargetFragmentationPercentForReduceMemory = 20;
583 : const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
584 : const int kTargetFragmentationPercentForOptimizeMemory = 20;
585 : const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
586 :
587 : // For regular mode (which is latency critical) we define less aggressive
588 : // defaults to start and switch to a trace-based (using compaction speed)
589 : // approach as soon as we have enough samples.
590 : const int kTargetFragmentationPercent = 70;
591 : const size_t kMaxEvacuatedBytes = 4 * MB;
592 : // Time to take for a single area (=payload of page). Used as soon as there
593 : // exist enough compaction speed samples.
594 : const float kTargetMsPerArea = .5;
595 :
596 447342 : if (heap()->ShouldReduceMemory()) {
597 23794 : *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
598 23794 : *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
599 133258 : } else if (heap()->ShouldOptimizeForMemoryUsage()) {
600 : *target_fragmentation_percent =
601 20 : kTargetFragmentationPercentForOptimizeMemory;
602 20 : *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
603 : } else {
604 : const double estimated_compaction_speed =
605 133238 : heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
606 133238 : if (estimated_compaction_speed != 0) {
607 : // Estimate the target fragmentation based on traced compaction speed
608 : // and a goal for a single page.
609 : const double estimated_ms_per_area =
610 101822 : 1 + area_size / estimated_compaction_speed;
611 : *target_fragmentation_percent = static_cast<int>(
612 101822 : 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
613 101822 : if (*target_fragmentation_percent <
614 : kTargetFragmentationPercentForReduceMemory) {
615 : *target_fragmentation_percent =
616 0 : kTargetFragmentationPercentForReduceMemory;
617 : }
618 : } else {
619 31416 : *target_fragmentation_percent = kTargetFragmentationPercent;
620 : }
621 133238 : *max_evacuated_bytes = kMaxEvacuatedBytes;
622 : }
623 157052 : }
624 :
625 319240 : void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
626 : DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
627 :
628 159620 : int number_of_pages = space->CountTotalPages();
629 159620 : size_t area_size = space->AreaSize();
630 :
631 : // Pairs of (live_bytes_in_page, page).
632 : typedef std::pair<size_t, Page*> LiveBytesPagePair;
633 : std::vector<LiveBytesPagePair> pages;
634 159620 : pages.reserve(number_of_pages);
635 :
636 : DCHECK(!sweeping_in_progress());
637 : Page* owner_of_linear_allocation_area =
638 : space->top() == space->limit()
639 : ? nullptr
640 159620 : : Page::FromAllocationAreaAddress(space->top());
641 494966 : for (Page* p : *space) {
642 585152 : if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
643 : !p->CanAllocate())
644 : continue;
645 : // Invariant: Evacuation candidates are just created when marking is
646 : // started. This means that sweeping has finished. Furthermore, at the end
647 : // of a GC all evacuation candidates are cleared and their slot buffers are
648 : // released.
649 124903 : CHECK(!p->IsEvacuationCandidate());
650 124903 : CHECK_NULL(p->slot_set<OLD_TO_OLD>());
651 124903 : CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
652 124903 : CHECK(p->SweepingDone());
653 : DCHECK(p->area_size() == area_size);
654 249806 : pages.push_back(std::make_pair(p->allocated_bytes(), p));
655 : }
656 :
657 : int candidate_count = 0;
658 : size_t total_live_bytes = 0;
659 :
660 159620 : const bool reduce_memory = heap()->ShouldReduceMemory();
661 159620 : if (FLAG_manual_evacuation_candidates_selection) {
662 1074 : for (size_t i = 0; i < pages.size(); i++) {
663 333 : Page* p = pages[i].second;
664 333 : if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
665 144 : candidate_count++;
666 144 : total_live_bytes += pages[i].first;
667 : p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
668 : AddEvacuationCandidate(p);
669 : }
670 : }
671 159212 : } else if (FLAG_stress_compaction_random) {
672 0 : double fraction = isolate()->fuzzer_rng()->NextDouble();
673 : size_t pages_to_mark_count =
674 0 : static_cast<size_t>(fraction * (pages.size() + 1));
675 0 : for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
676 0 : pages.size(), pages_to_mark_count)) {
677 0 : candidate_count++;
678 0 : total_live_bytes += pages[i].first;
679 0 : AddEvacuationCandidate(pages[i].second);
680 : }
681 159212 : } else if (FLAG_stress_compaction) {
682 11784 : for (size_t i = 0; i < pages.size(); i++) {
683 4812 : Page* p = pages[i].second;
684 4812 : if (i % 2 == 0) {
685 2973 : candidate_count++;
686 2973 : total_live_bytes += pages[i].first;
687 : AddEvacuationCandidate(p);
688 : }
689 : }
690 : } else {
691 : // The following approach determines the pages that should be evacuated.
692 : //
693 : // We use two conditions to decide whether a page qualifies as an evacuation
694 : // candidate, or not:
695 : // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
696 : // between live bytes and capacity of this page (= area).
697 : // * Evacuation quota: A global quota determining how much bytes should be
698 : // compacted.
699 : //
700 : // The algorithm sorts all pages by live bytes and then iterates through
701 : // them starting with the page with the most free memory, adding them to the
702 : // set of evacuation candidates as long as both conditions (fragmentation
703 : // and quota) hold.
704 : size_t max_evacuated_bytes;
705 : int target_fragmentation_percent;
706 : ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
707 157052 : &max_evacuated_bytes);
708 :
709 : const size_t free_bytes_threshold =
710 157052 : target_fragmentation_percent * (area_size / 100);
711 :
712 : // Sort pages from the most free to the least free, then select
713 : // the first n pages for evacuation such that:
714 : // - the total size of evacuated objects does not exceed the specified
715 : // limit.
716 : // - fragmentation of (n+1)-th page does not exceed the specified limit.
717 : std::sort(pages.begin(), pages.end(),
718 : [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
719 : return a.first < b.first;
720 157052 : });
721 553620 : for (size_t i = 0; i < pages.size(); i++) {
722 119758 : size_t live_bytes = pages[i].first;
723 : DCHECK_GE(area_size, live_bytes);
724 119758 : size_t free_bytes = area_size - live_bytes;
725 119758 : if (FLAG_always_compact ||
726 43510 : ((free_bytes >= free_bytes_threshold) &&
727 43510 : ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
728 43560 : candidate_count++;
729 43560 : total_live_bytes += live_bytes;
730 : }
731 119758 : if (FLAG_trace_fragmentation_verbose) {
732 : PrintIsolate(isolate(),
733 : "compaction-selection-page: space=%s free_bytes_page=%zu "
734 : "fragmentation_limit_kb=%" PRIuS
735 : " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
736 : "compaction_limit_kb=%zu\n",
737 : space->name(), free_bytes / KB, free_bytes_threshold / KB,
738 : target_fragmentation_percent, total_live_bytes / KB,
739 0 : max_evacuated_bytes / KB);
740 : }
741 : }
742 : // How many pages we will allocated for the evacuated objects
743 : // in the worst case: ceil(total_live_bytes / area_size)
744 : int estimated_new_pages =
745 157052 : static_cast<int>((total_live_bytes + area_size - 1) / area_size);
746 : DCHECK_LE(estimated_new_pages, candidate_count);
747 : int estimated_released_pages = candidate_count - estimated_new_pages;
748 : // Avoid (compact -> expand) cycles.
749 157052 : if ((estimated_released_pages == 0) && !FLAG_always_compact) {
750 : candidate_count = 0;
751 : }
752 164480 : for (int i = 0; i < candidate_count; i++) {
753 14856 : AddEvacuationCandidate(pages[i].second);
754 : }
755 : }
756 :
757 159620 : if (FLAG_trace_fragmentation) {
758 : PrintIsolate(isolate(),
759 : "compaction-selection: space=%s reduce_memory=%d pages=%d "
760 : "total_live_bytes=%zu\n",
761 : space->name(), reduce_memory, candidate_count,
762 0 : total_live_bytes / KB);
763 : }
764 159620 : }
765 :
766 :
767 61034 : void MarkCompactCollector::AbortCompaction() {
768 61034 : if (compacting_) {
769 35 : RememberedSet<OLD_TO_OLD>::ClearAll(heap());
770 148 : for (Page* p : evacuation_candidates_) {
771 : p->ClearEvacuationCandidate();
772 : }
773 35 : compacting_ = false;
774 : evacuation_candidates_.clear();
775 : }
776 : DCHECK(evacuation_candidates_.empty());
777 61034 : }
778 :
779 :
780 74510 : void MarkCompactCollector::Prepare() {
781 500258 : was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
782 :
783 : #ifdef DEBUG
784 : DCHECK(state_ == IDLE);
785 : state_ = PREPARE_GC;
786 : #endif
787 :
788 : DCHECK(!FLAG_never_compact || !FLAG_always_compact);
789 :
790 : // Instead of waiting we could also abort the sweeper threads here.
791 74510 : EnsureSweepingCompleted();
792 :
793 74510 : if (heap()->incremental_marking()->IsSweeping()) {
794 3552 : heap()->incremental_marking()->Stop();
795 : }
796 :
797 74510 : heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
798 :
799 74510 : if (!was_marked_incrementally_) {
800 212792 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
801 159594 : heap_->local_embedder_heap_tracer()->TracePrologue();
802 : }
803 :
804 : // Don't start compaction if we are in the middle of incremental
805 : // marking cycle. We did not collect any slots.
806 74510 : if (!FLAG_never_compact && !was_marked_incrementally_) {
807 53198 : StartCompaction();
808 : }
809 :
810 : PagedSpaces spaces(heap());
811 298040 : for (PagedSpace* space = spaces.next(); space != nullptr;
812 : space = spaces.next()) {
813 223530 : space->PrepareForMarkCompact();
814 : }
815 : heap()->account_external_memory_concurrently_freed();
816 :
817 : #ifdef VERIFY_HEAP
818 : if (!was_marked_incrementally_ && FLAG_verify_heap) {
819 : VerifyMarkbitsAreClean();
820 : }
821 : #endif
822 74510 : }
823 :
824 223566 : void MarkCompactCollector::FinishConcurrentMarking(
825 : ConcurrentMarking::StopRequest stop_request) {
826 : // FinishConcurrentMarking is called for both, concurrent and parallel,
827 : // marking. It is safe to call this function when tasks are already finished.
828 223566 : if (FLAG_parallel_marking || FLAG_concurrent_marking) {
829 663174 : heap()->concurrent_marking()->Stop(stop_request);
830 : heap()->concurrent_marking()->FlushMemoryChunkData(
831 442116 : non_atomic_marking_state());
832 : }
833 223566 : }
834 :
835 74510 : void MarkCompactCollector::VerifyMarking() {
836 74510 : CHECK(marking_worklist()->IsEmpty());
837 : DCHECK(heap_->incremental_marking()->IsStopped());
838 : #ifdef VERIFY_HEAP
839 : if (FLAG_verify_heap) {
840 : FullMarkingVerifier verifier(heap());
841 : verifier.Run();
842 : }
843 : #endif
844 : #ifdef VERIFY_HEAP
845 : if (FLAG_verify_heap) {
846 : heap()->old_space()->VerifyLiveBytes();
847 : heap()->map_space()->VerifyLiveBytes();
848 : heap()->code_space()->VerifyLiveBytes();
849 : }
850 : #endif
851 74510 : }
852 :
853 223530 : void MarkCompactCollector::Finish() {
854 372550 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
855 :
856 74510 : epoch_++;
857 :
858 : #ifdef DEBUG
859 : heap()->VerifyCountersBeforeConcurrentSweeping();
860 : #endif
861 :
862 74510 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
863 74510 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
864 74510 : weak_objects_.next_ephemerons.Clear();
865 :
866 74510 : sweeper()->StartSweeperTasks();
867 74510 : sweeper()->StartIterabilityTasks();
868 :
869 : // Clear the marking state of live large objects.
870 149020 : heap_->lo_space()->ClearMarkingStateOfLiveObjects();
871 149020 : heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
872 :
873 : #ifdef DEBUG
874 : DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
875 : state_ = IDLE;
876 : #endif
877 149020 : heap_->isolate()->inner_pointer_to_code_cache()->Flush();
878 :
879 : // The stub caches are not traversed during GC; clear them to force
880 : // their lazy re-initialization. This must be done after the
881 : // GC, because it relies on the new address of certain old space
882 : // objects (empty string, illegal builtin).
883 74510 : isolate()->load_stub_cache()->Clear();
884 74510 : isolate()->store_stub_cache()->Clear();
885 :
886 74510 : if (have_code_to_deoptimize_) {
887 : // Some code objects were marked for deoptimization during the GC.
888 57 : Deoptimizer::DeoptimizeMarkedCode(isolate());
889 57 : have_code_to_deoptimize_ = false;
890 74510 : }
891 74510 : }
892 :
893 74510 : class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
894 : public:
895 : explicit RootMarkingVisitor(MarkCompactCollector* collector)
896 74510 : : collector_(collector) {}
897 :
898 228738734 : void VisitRootPointer(Root root, const char* description,
899 : FullObjectSlot p) final {
900 : MarkObjectByPointer(root, p);
901 228738748 : }
902 :
903 1780724 : void VisitRootPointers(Root root, const char* description,
904 : FullObjectSlot start, FullObjectSlot end) final {
905 36326838 : for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
906 1780724 : }
907 :
908 : private:
909 : V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
910 523008199 : if (!(*p)->IsHeapObject()) return;
911 :
912 516495162 : collector_->MarkRootObject(root, HeapObject::cast(*p));
913 : }
914 :
915 : MarkCompactCollector* const collector_;
916 : };
917 :
918 : // This visitor is used to visit the body of special objects held alive by
919 : // other roots.
920 : //
921 : // It is currently used for
922 : // - Code held alive by the top optimized frame. This code cannot be deoptimized
923 : // and thus have to be kept alive in an isolate way, i.e., it should not keep
924 : // alive other code objects reachable through the weak list but they should
925 : // keep alive its embedded pointers (which would otherwise be dropped).
926 : // - Prefix of the string table.
927 74510 : class MarkCompactCollector::CustomRootBodyMarkingVisitor final
928 : : public ObjectVisitor {
929 : public:
930 : explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
931 74510 : : collector_(collector) {}
932 :
933 0 : void VisitPointer(HeapObject host, ObjectSlot p) final {
934 : MarkObject(host, *p);
935 0 : }
936 :
937 75523 : void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
938 527196 : for (ObjectSlot p = start; p < end; ++p) {
939 : DCHECK(!HasWeakHeapObjectTag(*p));
940 : MarkObject(host, *p);
941 : }
942 75523 : }
943 :
944 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
945 : MaybeObjectSlot end) final {
946 : // At the moment, custom roots cannot contain weak pointers.
947 0 : UNREACHABLE();
948 : }
949 :
950 : // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
951 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
952 0 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
953 : MarkObject(host, target);
954 0 : }
955 7770 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
956 : MarkObject(host, rinfo->target_object());
957 7770 : }
958 :
959 : private:
960 : V8_INLINE void MarkObject(HeapObject host, Object object) {
961 767840 : if (!object->IsHeapObject()) return;
962 87688 : collector_->MarkObject(host, HeapObject::cast(object));
963 : }
964 :
965 : MarkCompactCollector* const collector_;
966 : };
967 :
968 74510 : class InternalizedStringTableCleaner : public ObjectVisitor {
969 : public:
970 : InternalizedStringTableCleaner(Heap* heap, HeapObject table)
971 74510 : : heap_(heap), pointers_removed_(0), table_(table) {}
972 :
973 74510 : void VisitPointers(HeapObject host, ObjectSlot start,
974 : ObjectSlot end) override {
975 : // Visit all HeapObject pointers in [start, end).
976 74510 : Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
977 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
978 : heap_->mark_compact_collector()->non_atomic_marking_state();
979 192634396 : for (ObjectSlot p = start; p < end; ++p) {
980 192485376 : Object o = *p;
981 192485376 : if (o->IsHeapObject()) {
982 : HeapObject heap_object = HeapObject::cast(o);
983 192485376 : if (marking_state->IsWhite(heap_object)) {
984 4988526 : pointers_removed_++;
985 : // Set the entry to the_hole_value (as deleted).
986 : p.store(the_hole);
987 : } else {
988 : // StringTable contains only old space strings.
989 : DCHECK(!Heap::InYoungGeneration(o));
990 : MarkCompactCollector::RecordSlot(table_, p, heap_object);
991 : }
992 : }
993 : }
994 74510 : }
995 :
996 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
997 : MaybeObjectSlot end) final {
998 0 : UNREACHABLE();
999 : }
1000 :
1001 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
1002 :
1003 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
1004 0 : UNREACHABLE();
1005 : }
1006 :
1007 : int PointersRemoved() {
1008 : return pointers_removed_;
1009 : }
1010 :
1011 : private:
1012 : Heap* heap_;
1013 : int pointers_removed_;
1014 : HeapObject table_;
1015 : };
1016 :
1017 74510 : class ExternalStringTableCleaner : public RootVisitor {
1018 : public:
1019 74510 : explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1020 :
1021 74486 : void VisitRootPointers(Root root, const char* description,
1022 : FullObjectSlot start, FullObjectSlot end) override {
1023 : // Visit all HeapObject pointers in [start, end).
1024 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
1025 : heap_->mark_compact_collector()->non_atomic_marking_state();
1026 74486 : Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
1027 260000 : for (FullObjectSlot p = start; p < end; ++p) {
1028 111028 : Object o = *p;
1029 111028 : if (o->IsHeapObject()) {
1030 : HeapObject heap_object = HeapObject::cast(o);
1031 111028 : if (marking_state->IsWhite(heap_object)) {
1032 1567 : if (o->IsExternalString()) {
1033 1567 : heap_->FinalizeExternalString(String::cast(o));
1034 : } else {
1035 : // The original external string may have been internalized.
1036 : DCHECK(o->IsThinString());
1037 : }
1038 : // Set the entry to the_hole_value (as deleted).
1039 : p.store(the_hole);
1040 : }
1041 : }
1042 : }
1043 74486 : }
1044 :
1045 : private:
1046 : Heap* heap_;
1047 : };
1048 :
1049 : // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1050 : // are retained.
1051 74510 : class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1052 : public:
1053 : explicit MarkCompactWeakObjectRetainer(
1054 : MarkCompactCollector::NonAtomicMarkingState* marking_state)
1055 74510 : : marking_state_(marking_state) {}
1056 :
1057 3999599 : Object RetainAs(Object object) override {
1058 : HeapObject heap_object = HeapObject::cast(object);
1059 : DCHECK(!marking_state_->IsGrey(heap_object));
1060 3999599 : if (marking_state_->IsBlack(heap_object)) {
1061 3764243 : return object;
1062 383478 : } else if (object->IsAllocationSite() &&
1063 : !(AllocationSite::cast(object)->IsZombie())) {
1064 : // "dead" AllocationSites need to live long enough for a traversal of new
1065 : // space. These sites get a one-time reprieve.
1066 :
1067 77016 : Object nested = object;
1068 234346 : while (nested->IsAllocationSite()) {
1069 80314 : AllocationSite current_site = AllocationSite::cast(nested);
1070 : // MarkZombie will override the nested_site, read it first before
1071 : // marking
1072 80314 : nested = current_site->nested_site();
1073 80314 : current_site->MarkZombie();
1074 : marking_state_->WhiteToBlack(current_site);
1075 : }
1076 :
1077 77016 : return object;
1078 : } else {
1079 158340 : return Object();
1080 : }
1081 : }
1082 :
1083 : private:
1084 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1085 : };
1086 :
1087 62380 : class RecordMigratedSlotVisitor : public ObjectVisitor {
1088 : public:
1089 : explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1090 62445 : : collector_(collector) {}
1091 :
1092 0 : inline void VisitPointer(HeapObject host, ObjectSlot p) final {
1093 : DCHECK(!HasWeakHeapObjectTag(*p));
1094 220752304 : RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
1095 0 : }
1096 :
1097 0 : inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1098 71099984 : RecordMigratedSlot(host, *p, p.address());
1099 0 : }
1100 :
1101 784389 : inline void VisitPointers(HeapObject host, ObjectSlot start,
1102 : ObjectSlot end) final {
1103 122887198 : while (start < end) {
1104 : VisitPointer(host, start);
1105 : ++start;
1106 : }
1107 784309 : }
1108 :
1109 0 : inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
1110 : MaybeObjectSlot end) final {
1111 36816392 : while (start < end) {
1112 : VisitPointer(host, start);
1113 : ++start;
1114 : }
1115 0 : }
1116 :
1117 366 : inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1118 : DCHECK_EQ(host, rinfo->host());
1119 : DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1120 366 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1121 : // The target is always in old space, we don't have to record the slot in
1122 : // the old-to-new remembered set.
1123 : DCHECK(!Heap::InYoungGeneration(target));
1124 366 : collector_->RecordRelocSlot(host, rinfo, target);
1125 366 : }
1126 :
1127 55865 : inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1128 : DCHECK_EQ(host, rinfo->host());
1129 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1130 55865 : HeapObject object = HeapObject::cast(rinfo->target_object());
1131 55865 : GenerationalBarrierForCode(host, rinfo, object);
1132 55865 : collector_->RecordRelocSlot(host, rinfo, object);
1133 55865 : }
1134 :
1135 : // Entries that are skipped for recording.
1136 0 : inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
1137 0 : inline void VisitExternalReference(Foreign host, Address* p) final {}
1138 36797 : inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
1139 0 : inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1140 :
1141 : protected:
1142 145498247 : inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
1143 : Address slot) {
1144 145498247 : if (value->IsStrongOrWeak()) {
1145 108966470 : MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
1146 108966470 : if (p->InYoungGeneration()) {
1147 : DCHECK_IMPLIES(
1148 : p->IsToPage(),
1149 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
1150 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1151 2737478 : MemoryChunk::FromHeapObject(host), slot);
1152 106228992 : } else if (p->IsEvacuationCandidate()) {
1153 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1154 13073914 : MemoryChunk::FromHeapObject(host), slot);
1155 : }
1156 : }
1157 145498686 : }
1158 :
1159 : MarkCompactCollector* collector_;
1160 : };
1161 :
1162 : class MigrationObserver {
1163 : public:
1164 62380 : explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1165 :
1166 0 : virtual ~MigrationObserver() = default;
1167 : virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1168 : int size) = 0;
1169 :
1170 : protected:
1171 : Heap* heap_;
1172 : };
1173 :
1174 0 : class ProfilingMigrationObserver final : public MigrationObserver {
1175 : public:
1176 62380 : explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1177 :
1178 440988 : inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1179 : int size) final {
1180 613076 : if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1181 506 : PROFILE(heap_->isolate(),
1182 : CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1183 : }
1184 440921 : heap_->OnMoveEvent(dst, src, size);
1185 440812 : }
1186 : };
1187 :
1188 309041 : class HeapObjectVisitor {
1189 : public:
1190 309041 : virtual ~HeapObjectVisitor() = default;
1191 : virtual bool Visit(HeapObject object, int size) = 0;
1192 : };
1193 :
1194 154508 : class EvacuateVisitorBase : public HeapObjectVisitor {
1195 : public:
1196 : void AddObserver(MigrationObserver* observer) {
1197 1444 : migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1198 1444 : observers_.push_back(observer);
1199 : }
1200 :
1201 : protected:
1202 : enum MigrationMode { kFast, kObserved };
1203 :
1204 : typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
1205 : HeapObject src, int size,
1206 : AllocationSpace dest);
1207 :
1208 : template <MigrationMode mode>
1209 66725819 : static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
1210 : HeapObject src, int size, AllocationSpace dest) {
1211 : Address dst_addr = dst->address();
1212 : Address src_addr = src->address();
1213 : DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1214 : DCHECK_NE(dest, LO_SPACE);
1215 : DCHECK_NE(dest, CODE_LO_SPACE);
1216 66725819 : if (dest == OLD_SPACE) {
1217 : DCHECK_OBJECT_SIZE(size);
1218 : DCHECK(IsAligned(size, kTaggedSize));
1219 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1220 : if (mode != MigrationMode::kFast)
1221 : base->ExecuteMigrationObservers(dest, src, dst, size);
1222 43129534 : dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1223 23834753 : } else if (dest == CODE_SPACE) {
1224 : DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1225 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1226 3836 : Code::cast(dst)->Relocate(dst_addr - src_addr);
1227 : if (mode != MigrationMode::kFast)
1228 : base->ExecuteMigrationObservers(dest, src, dst, size);
1229 1918 : dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1230 : } else {
1231 : DCHECK_OBJECT_SIZE(size);
1232 : DCHECK(dest == NEW_SPACE);
1233 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1234 : if (mode != MigrationMode::kFast)
1235 : base->ExecuteMigrationObservers(dest, src, dst, size);
1236 : }
1237 : src->set_map_word(MapWord::FromForwardingAddress(dst));
1238 67213795 : }
1239 :
1240 : EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1241 : RecordMigratedSlotVisitor* record_visitor)
1242 : : heap_(heap),
1243 : local_allocator_(local_allocator),
1244 154508 : record_visitor_(record_visitor) {
1245 154508 : migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1246 : }
1247 :
1248 43268372 : inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
1249 : int size, HeapObject* target_object) {
1250 : #ifdef VERIFY_HEAP
1251 : if (AbortCompactionForTesting(object)) return false;
1252 : #endif // VERIFY_HEAP
1253 : AllocationAlignment alignment =
1254 : HeapObject::RequiredAlignment(object->map());
1255 : AllocationResult allocation =
1256 43268372 : local_allocator_->Allocate(target_space, size, alignment);
1257 42906290 : if (allocation.To(target_object)) {
1258 : MigrateObject(*target_object, object, size, target_space);
1259 43409084 : return true;
1260 : }
1261 : return false;
1262 : }
1263 :
1264 : inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
1265 : HeapObject dst, int size) {
1266 1323064 : for (MigrationObserver* obs : observers_) {
1267 441087 : obs->Move(dest, src, dst, size);
1268 : }
1269 : }
1270 :
1271 : inline void MigrateObject(HeapObject dst, HeapObject src, int size,
1272 : AllocationSpace dest) {
1273 65840421 : migration_function_(this, dst, src, size, dest);
1274 : }
1275 :
1276 : #ifdef VERIFY_HEAP
1277 : bool AbortCompactionForTesting(HeapObject object) {
1278 : if (FLAG_stress_compaction) {
1279 : const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1280 : kPageAlignmentMask & ~kObjectAlignmentMask;
1281 : if ((object->ptr() & kPageAlignmentMask) == mask) {
1282 : Page* page = Page::FromHeapObject(object);
1283 : if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1284 : page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1285 : } else {
1286 : page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1287 : return true;
1288 : }
1289 : }
1290 : }
1291 : return false;
1292 : }
1293 : #endif // VERIFY_HEAP
1294 :
1295 : Heap* heap_;
1296 : LocalAllocator* local_allocator_;
1297 : RecordMigratedSlotVisitor* record_visitor_;
1298 : std::vector<MigrationObserver*> observers_;
1299 : MigrateFunction migration_function_;
1300 : };
1301 :
1302 154508 : class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1303 : public:
1304 77254 : explicit EvacuateNewSpaceVisitor(
1305 : Heap* heap, LocalAllocator* local_allocator,
1306 : RecordMigratedSlotVisitor* record_visitor,
1307 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1308 : : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1309 : buffer_(LocalAllocationBuffer::InvalidBuffer()),
1310 : promoted_size_(0),
1311 : semispace_copied_size_(0),
1312 : local_pretenuring_feedback_(local_pretenuring_feedback),
1313 231762 : is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
1314 :
1315 35108067 : inline bool Visit(HeapObject object, int size) override {
1316 35108067 : if (TryEvacuateWithoutCopy(object)) return true;
1317 33784091 : HeapObject target_object;
1318 43889743 : if (heap_->ShouldBePromoted(object->address()) &&
1319 10105107 : TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1320 10105765 : promoted_size_ += size;
1321 10105765 : return true;
1322 : }
1323 : heap_->UpdateAllocationSite(object->map(), object,
1324 47820172 : local_pretenuring_feedback_);
1325 23898308 : HeapObject target;
1326 23898308 : AllocationSpace space = AllocateTargetObject(object, size, &target);
1327 23831056 : MigrateObject(HeapObject::cast(target), object, size, space);
1328 23885276 : semispace_copied_size_ += size;
1329 23885276 : return true;
1330 : }
1331 :
1332 : intptr_t promoted_size() { return promoted_size_; }
1333 : intptr_t semispace_copied_size() { return semispace_copied_size_; }
1334 :
1335 : private:
1336 35112921 : inline bool TryEvacuateWithoutCopy(HeapObject object) {
1337 35112921 : if (is_incremental_marking_) return false;
1338 :
1339 : Map map = object->map();
1340 :
1341 : // Some objects can be evacuated without creating a copy.
1342 35115009 : if (map->visitor_id() == kVisitThinString) {
1343 : HeapObject actual = ThinString::cast(object)->unchecked_actual();
1344 1374381 : if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1345 : object->map_slot().Relaxed_Store(
1346 : MapWord::FromForwardingAddress(actual).ToMap());
1347 1371164 : return true;
1348 : }
1349 : // TODO(mlippautz): Handle ConsString.
1350 :
1351 : return false;
1352 : }
1353 :
1354 23897167 : inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
1355 2049 : HeapObject* target_object) {
1356 : AllocationAlignment alignment =
1357 : HeapObject::RequiredAlignment(old_object->map());
1358 : AllocationSpace space_allocated_in = NEW_SPACE;
1359 : AllocationResult allocation =
1360 23897167 : local_allocator_->Allocate(NEW_SPACE, size, alignment);
1361 23861558 : if (allocation.IsRetry()) {
1362 2049 : allocation = AllocateInOldSpace(size, alignment);
1363 : space_allocated_in = OLD_SPACE;
1364 : }
1365 23861558 : bool ok = allocation.To(target_object);
1366 : DCHECK(ok);
1367 : USE(ok);
1368 23849926 : return space_allocated_in;
1369 : }
1370 :
1371 2049 : inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1372 : AllocationAlignment alignment) {
1373 : AllocationResult allocation =
1374 2049 : local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1375 2049 : if (allocation.IsRetry()) {
1376 : heap_->FatalProcessOutOfMemory(
1377 0 : "MarkCompactCollector: semi-space copy, fallback in old gen");
1378 : }
1379 2049 : return allocation;
1380 : }
1381 :
1382 : LocalAllocationBuffer buffer_;
1383 : intptr_t promoted_size_;
1384 : intptr_t semispace_copied_size_;
1385 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1386 : bool is_incremental_marking_;
1387 : };
1388 :
1389 : template <PageEvacuationMode mode>
1390 154508 : class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1391 : public:
1392 : explicit EvacuateNewSpacePageVisitor(
1393 : Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1394 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1395 : : heap_(heap),
1396 : record_visitor_(record_visitor),
1397 : moved_bytes_(0),
1398 154508 : local_pretenuring_feedback_(local_pretenuring_feedback) {}
1399 :
1400 2367 : static void Move(Page* page) {
1401 : switch (mode) {
1402 : case NEW_TO_NEW:
1403 2367 : page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1404 : page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1405 : break;
1406 : case NEW_TO_OLD: {
1407 588 : page->heap()->new_space()->from_space().RemovePage(page);
1408 588 : Page* new_page = Page::ConvertNewToOld(page);
1409 : DCHECK(!new_page->InYoungGeneration());
1410 : new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1411 : break;
1412 : }
1413 : }
1414 2367 : }
1415 :
1416 5996107 : inline bool Visit(HeapObject object, int size) override {
1417 : if (mode == NEW_TO_NEW) {
1418 5996107 : heap_->UpdateAllocationSite(object->map(), object,
1419 11992214 : local_pretenuring_feedback_);
1420 : } else if (mode == NEW_TO_OLD) {
1421 2598670 : object->IterateBodyFast(record_visitor_);
1422 : }
1423 5981076 : return true;
1424 : }
1425 :
1426 : intptr_t moved_bytes() { return moved_bytes_; }
1427 2367 : void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1428 :
1429 : private:
1430 : Heap* heap_;
1431 : RecordMigratedSlotVisitor* record_visitor_;
1432 : intptr_t moved_bytes_;
1433 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1434 : };
1435 :
1436 77254 : class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1437 : public:
1438 : EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1439 : RecordMigratedSlotVisitor* record_visitor)
1440 77254 : : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1441 :
1442 33967935 : inline bool Visit(HeapObject object, int size) override {
1443 33967935 : HeapObject target_object;
1444 33967935 : if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
1445 33967935 : object, size, &target_object)) {
1446 : DCHECK(object->map_word().IsForwardingAddress());
1447 : return true;
1448 : }
1449 25 : return false;
1450 : }
1451 : };
1452 :
1453 25 : class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1454 : public:
1455 25 : explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1456 :
1457 0 : inline bool Visit(HeapObject object, int size) override {
1458 65 : RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1459 65 : object->IterateBodyFast(&visitor);
1460 0 : return true;
1461 : }
1462 :
1463 : private:
1464 : Heap* heap_;
1465 : };
1466 :
1467 10328074 : bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
1468 10328074 : Object o = *p;
1469 10328074 : if (!o->IsHeapObject()) return false;
1470 : HeapObject heap_object = HeapObject::cast(o);
1471 : return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1472 10328074 : heap_object);
1473 : }
1474 :
1475 74510 : void MarkCompactCollector::MarkStringTable(
1476 : ObjectVisitor* custom_root_body_visitor) {
1477 74510 : StringTable string_table = heap()->string_table();
1478 : // Mark the string table itself.
1479 74510 : if (marking_state()->WhiteToBlack(string_table)) {
1480 : // Explicitly mark the prefix.
1481 74058 : string_table->IteratePrefix(custom_root_body_visitor);
1482 : }
1483 74510 : }
1484 :
1485 74510 : void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1486 : ObjectVisitor* custom_root_body_visitor) {
1487 : // Mark the heap roots including global variables, stack variables,
1488 : // etc., and all objects reachable from them.
1489 74510 : heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1490 :
1491 : // Custom marking for string table and top optimized frame.
1492 74510 : MarkStringTable(custom_root_body_visitor);
1493 74510 : ProcessTopOptimizedFrame(custom_root_body_visitor);
1494 74510 : }
1495 :
1496 149020 : void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1497 : bool work_to_do = true;
1498 : int iterations = 0;
1499 149020 : int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1500 :
1501 447096 : while (work_to_do) {
1502 149056 : PerformWrapperTracing();
1503 :
1504 149056 : if (iterations >= max_iterations) {
1505 : // Give up fixpoint iteration and switch to linear algorithm.
1506 0 : ProcessEphemeronsLinear();
1507 0 : break;
1508 : }
1509 :
1510 : // Move ephemerons from next_ephemerons into current_ephemerons to
1511 : // drain them in this iteration.
1512 149056 : weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1513 596178 : heap()->concurrent_marking()->set_ephemeron_marked(false);
1514 :
1515 : {
1516 596224 : TRACE_GC(heap()->tracer(),
1517 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1518 :
1519 149056 : if (FLAG_parallel_marking) {
1520 294768 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1521 : }
1522 :
1523 149056 : work_to_do = ProcessEphemerons();
1524 : FinishConcurrentMarking(
1525 298112 : ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1526 : }
1527 :
1528 149056 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
1529 149056 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1530 :
1531 298092 : work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
1532 149020 : heap()->concurrent_marking()->ephemeron_marked() ||
1533 447096 : !marking_worklist()->IsEmbedderEmpty() ||
1534 149020 : !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1535 149056 : ++iterations;
1536 : }
1537 :
1538 149020 : CHECK(marking_worklist()->IsEmpty());
1539 149020 : CHECK(weak_objects_.current_ephemerons.IsEmpty());
1540 149020 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1541 149020 : }
1542 :
1543 149056 : bool MarkCompactCollector::ProcessEphemerons() {
1544 149056 : Ephemeron ephemeron;
1545 : bool ephemeron_marked = false;
1546 :
1547 : // Drain current_ephemerons and push ephemerons where key and value are still
1548 : // unreachable into next_ephemerons.
1549 298123 : while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1550 11 : if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1551 : ephemeron_marked = true;
1552 : }
1553 : }
1554 :
1555 : // Drain marking worklist and push discovered ephemerons into
1556 : // discovered_ephemerons.
1557 : ProcessMarkingWorklist();
1558 :
1559 : // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
1560 : // before) and push ephemerons where key and value are still unreachable into
1561 : // next_ephemerons.
1562 149143 : while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1563 87 : if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1564 : ephemeron_marked = true;
1565 : }
1566 : }
1567 :
1568 : // Flush local ephemerons for main task to global pool.
1569 149056 : weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
1570 149056 : weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1571 :
1572 149056 : return ephemeron_marked;
1573 : }
1574 :
1575 0 : void MarkCompactCollector::ProcessEphemeronsLinear() {
1576 0 : TRACE_GC(heap()->tracer(),
1577 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1578 0 : CHECK(heap()->concurrent_marking()->IsStopped());
1579 0 : std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
1580 0 : Ephemeron ephemeron;
1581 :
1582 : DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1583 0 : weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1584 :
1585 0 : while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1586 0 : VisitEphemeron(ephemeron.key, ephemeron.value);
1587 :
1588 0 : if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1589 0 : key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1590 : }
1591 : }
1592 :
1593 0 : ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1594 : bool work_to_do = true;
1595 :
1596 0 : while (work_to_do) {
1597 0 : PerformWrapperTracing();
1598 :
1599 : ResetNewlyDiscovered();
1600 0 : ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1601 :
1602 : {
1603 0 : TRACE_GC(heap()->tracer(),
1604 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1605 : // Drain marking worklist and push all discovered objects into
1606 : // newly_discovered.
1607 : ProcessMarkingWorklistInternal<
1608 : MarkCompactCollector::MarkingWorklistProcessingMode::
1609 0 : kTrackNewlyDiscoveredObjects>();
1610 : }
1611 :
1612 0 : while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1613 0 : VisitEphemeron(ephemeron.key, ephemeron.value);
1614 :
1615 0 : if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1616 0 : key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1617 : }
1618 : }
1619 :
1620 0 : if (ephemeron_marking_.newly_discovered_overflowed) {
1621 : // If newly_discovered was overflowed just visit all ephemerons in
1622 : // next_ephemerons.
1623 0 : weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1624 0 : if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1625 : non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1626 0 : marking_worklist()->Push(ephemeron.value);
1627 : }
1628 0 : });
1629 :
1630 : } else {
1631 : // This is the good case: newly_discovered stores all discovered
1632 : // objects. Now use key_to_values to see if discovered objects keep more
1633 : // objects alive due to ephemeron semantics.
1634 0 : for (HeapObject object : ephemeron_marking_.newly_discovered) {
1635 : auto range = key_to_values.equal_range(object);
1636 0 : for (auto it = range.first; it != range.second; ++it) {
1637 0 : HeapObject value = it->second;
1638 : MarkObject(object, value);
1639 : }
1640 : }
1641 : }
1642 :
1643 : // Do NOT drain marking worklist here, otherwise the current checks
1644 : // for work_to_do are not sufficient for determining if another iteration
1645 : // is necessary.
1646 :
1647 0 : work_to_do = !marking_worklist()->IsEmpty() ||
1648 0 : !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1649 0 : CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1650 : }
1651 :
1652 : ResetNewlyDiscovered();
1653 0 : ephemeron_marking_.newly_discovered.shrink_to_fit();
1654 :
1655 0 : CHECK(marking_worklist()->IsEmpty());
1656 0 : }
1657 :
1658 223566 : void MarkCompactCollector::PerformWrapperTracing() {
1659 447132 : if (heap_->local_embedder_heap_tracer()->InUse()) {
1660 540 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
1661 : {
1662 : LocalEmbedderHeapTracer::ProcessingScope scope(
1663 270 : heap_->local_embedder_heap_tracer());
1664 135 : HeapObject object;
1665 305 : while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
1666 35 : scope.TracePossibleWrapper(JSObject::cast(object));
1667 135 : }
1668 : }
1669 : heap_->local_embedder_heap_tracer()->Trace(
1670 405 : std::numeric_limits<double>::infinity());
1671 : }
1672 223566 : }
1673 :
1674 0 : void MarkCompactCollector::ProcessMarkingWorklist() {
1675 : ProcessMarkingWorklistInternal<
1676 521606 : MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
1677 0 : }
1678 :
1679 : template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
1680 521606 : void MarkCompactCollector::ProcessMarkingWorklistInternal() {
1681 : HeapObject object;
1682 521606 : MarkCompactMarkingVisitor visitor(this, marking_state());
1683 148717290 : while (!(object = marking_worklist()->Pop()).is_null()) {
1684 : DCHECK(!object->IsFiller());
1685 : DCHECK(object->IsHeapObject());
1686 : DCHECK(heap()->Contains(object));
1687 : DCHECK(!(marking_state()->IsWhite(object)));
1688 : marking_state()->GreyToBlack(object);
1689 : if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1690 : kTrackNewlyDiscoveredObjects) {
1691 0 : AddNewlyDiscovered(object);
1692 : }
1693 : Map map = object->map();
1694 : MarkObject(object, map);
1695 : visitor.Visit(map, object);
1696 : }
1697 521606 : }
1698 :
1699 98 : bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
1700 98 : if (marking_state()->IsBlackOrGrey(key)) {
1701 10 : if (marking_state()->WhiteToGrey(value)) {
1702 : marking_worklist()->Push(value);
1703 10 : return true;
1704 : }
1705 :
1706 88 : } else if (marking_state()->IsWhite(value)) {
1707 88 : weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
1708 : }
1709 :
1710 : return false;
1711 : }
1712 :
1713 149020 : void MarkCompactCollector::ProcessEphemeronMarking() {
1714 : DCHECK(marking_worklist()->IsEmpty());
1715 :
1716 : // Incremental marking might leave ephemerons in main task's local
1717 : // buffer, flush it into global pool.
1718 149020 : weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1719 :
1720 149020 : ProcessEphemeronsUntilFixpoint();
1721 :
1722 149020 : CHECK(marking_worklist()->IsEmpty());
1723 298040 : CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1724 149020 : }
1725 :
1726 74510 : void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1727 235010 : for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1728 85990 : !it.done(); it.Advance()) {
1729 131912 : if (it.frame()->type() == StackFrame::INTERPRETED) {
1730 : return;
1731 : }
1732 95431 : if (it.frame()->type() == StackFrame::OPTIMIZED) {
1733 9441 : Code code = it.frame()->LookupCode();
1734 18882 : if (!code->CanDeoptAt(it.frame()->pc())) {
1735 1465 : Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
1736 : }
1737 : return;
1738 : }
1739 : }
1740 : }
1741 :
1742 74510 : void MarkCompactCollector::RecordObjectStats() {
1743 74510 : if (V8_UNLIKELY(FLAG_gc_stats)) {
1744 0 : heap()->CreateObjectStats();
1745 : ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
1746 : heap()->dead_object_stats_.get());
1747 0 : collector.Collect();
1748 0 : if (V8_UNLIKELY(FLAG_gc_stats &
1749 : v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1750 0 : std::stringstream live, dead;
1751 0 : heap()->live_object_stats_->Dump(live);
1752 0 : heap()->dead_object_stats_->Dump(dead);
1753 0 : TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1754 : "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1755 : "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1756 0 : TRACE_STR_COPY(dead.str().c_str()));
1757 : }
1758 0 : if (FLAG_trace_gc_object_stats) {
1759 0 : heap()->live_object_stats_->PrintJSON("live");
1760 0 : heap()->dead_object_stats_->PrintJSON("dead");
1761 : }
1762 0 : heap()->live_object_stats_->CheckpointObjectStats();
1763 0 : heap()->dead_object_stats_->ClearObjectStats();
1764 : }
1765 74510 : }
1766 :
1767 74510 : void MarkCompactCollector::MarkLiveObjects() {
1768 1287982 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1769 : // The recursive GC marker detects when it is nearing stack overflow,
1770 : // and switches to a different marking system. JS interrupts interfere
1771 : // with the C stack limit check.
1772 : PostponeInterruptsScope postpone(isolate());
1773 :
1774 : {
1775 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1776 74510 : IncrementalMarking* incremental_marking = heap_->incremental_marking();
1777 74510 : if (was_marked_incrementally_) {
1778 21312 : incremental_marking->Finalize();
1779 : } else {
1780 53198 : CHECK(incremental_marking->IsStopped());
1781 74510 : }
1782 : }
1783 :
1784 : #ifdef DEBUG
1785 : DCHECK(state_ == PREPARE_GC);
1786 : state_ = MARK_LIVE_OBJECTS;
1787 : #endif
1788 :
1789 149020 : heap_->local_embedder_heap_tracer()->EnterFinalPause();
1790 :
1791 : RootMarkingVisitor root_visitor(this);
1792 :
1793 : {
1794 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1795 : CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1796 149020 : MarkRoots(&root_visitor, &custom_root_body_visitor);
1797 : }
1798 :
1799 : {
1800 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1801 74510 : if (FLAG_parallel_marking) {
1802 147348 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1803 : }
1804 : ProcessMarkingWorklist();
1805 :
1806 : FinishConcurrentMarking(
1807 74510 : ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1808 74510 : ProcessMarkingWorklist();
1809 : }
1810 :
1811 : {
1812 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1813 :
1814 : DCHECK(marking_worklist()->IsEmpty());
1815 :
1816 : // Mark objects reachable through the embedder heap. This phase is
1817 : // opportunistic as it may not discover graphs that are only reachable
1818 : // through ephemerons.
1819 : {
1820 298040 : TRACE_GC(heap()->tracer(),
1821 : GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
1822 74510 : do {
1823 : // PerformWrapperTracing() also empties the work items collected by
1824 : // concurrent markers. As a result this call needs to happen at least
1825 : // once.
1826 74510 : PerformWrapperTracing();
1827 : ProcessMarkingWorklist();
1828 223530 : } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
1829 74510 : !marking_worklist()->IsEmbedderEmpty());
1830 : DCHECK(marking_worklist()->IsEmbedderEmpty());
1831 74510 : DCHECK(marking_worklist()->IsEmpty());
1832 : }
1833 :
1834 : // The objects reachable from the roots are marked, yet unreachable objects
1835 : // are unmarked. Mark objects reachable due to embedder heap tracing or
1836 : // harmony weak maps.
1837 : {
1838 298040 : TRACE_GC(heap()->tracer(),
1839 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1840 74510 : ProcessEphemeronMarking();
1841 74510 : DCHECK(marking_worklist()->IsEmpty());
1842 : }
1843 :
1844 : // The objects reachable from the roots, weak maps, and embedder heap
1845 : // tracing are marked. Objects pointed to only by weak global handles cannot
1846 : // be immediately reclaimed. Instead, we have to mark them as pending and
1847 : // mark objects reachable from them.
1848 : //
1849 : // First we identify nonlive weak handles and mark them as pending
1850 : // destruction.
1851 : {
1852 298040 : TRACE_GC(heap()->tracer(),
1853 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
1854 : heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
1855 74510 : &IsUnmarkedHeapObject);
1856 74510 : ProcessMarkingWorklist();
1857 : }
1858 :
1859 : // Process finalizers, effectively keeping them alive until the next
1860 : // garbage collection.
1861 : {
1862 298040 : TRACE_GC(heap()->tracer(),
1863 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
1864 : heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
1865 74510 : &root_visitor);
1866 74510 : ProcessMarkingWorklist();
1867 : }
1868 :
1869 : // Repeat ephemeron processing from the newly marked objects.
1870 : {
1871 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
1872 74510 : ProcessEphemeronMarking();
1873 : DCHECK(marking_worklist()->IsEmbedderEmpty());
1874 74510 : DCHECK(marking_worklist()->IsEmpty());
1875 : }
1876 :
1877 : {
1878 : heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
1879 74510 : &IsUnmarkedHeapObject);
1880 74510 : }
1881 : }
1882 :
1883 74510 : if (was_marked_incrementally_) {
1884 21312 : heap()->incremental_marking()->Deactivate();
1885 74510 : }
1886 74510 : }
1887 :
1888 74510 : void MarkCompactCollector::ClearNonLiveReferences() {
1889 1192160 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
1890 :
1891 : {
1892 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
1893 :
1894 : // Prune the string table removing all strings only pointed to by the
1895 : // string table. Cannot use string_table() here because the string
1896 : // table is marked.
1897 74510 : StringTable string_table = heap()->string_table();
1898 : InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
1899 74510 : string_table->IterateElements(&internalized_visitor);
1900 74510 : string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
1901 :
1902 : ExternalStringTableCleaner external_visitor(heap());
1903 74510 : heap()->external_string_table_.IterateAll(&external_visitor);
1904 149020 : heap()->external_string_table_.CleanUpAll();
1905 : }
1906 :
1907 : {
1908 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
1909 149020 : ClearOldBytecodeCandidates();
1910 : }
1911 :
1912 : {
1913 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
1914 149020 : ClearFlushedJsFunctions();
1915 : }
1916 :
1917 : {
1918 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
1919 : // Process the weak references.
1920 : MarkCompactWeakObjectRetainer mark_compact_object_retainer(
1921 74510 : non_atomic_marking_state());
1922 149020 : heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
1923 : }
1924 :
1925 : {
1926 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
1927 : // ClearFullMapTransitions must be called before weak references are
1928 : // cleared.
1929 149020 : ClearFullMapTransitions();
1930 : }
1931 : {
1932 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
1933 74510 : ClearWeakReferences();
1934 74510 : ClearWeakCollections();
1935 149020 : ClearJSWeakRefs();
1936 : }
1937 :
1938 74510 : MarkDependentCodeForDeoptimization();
1939 :
1940 : DCHECK(weak_objects_.transition_arrays.IsEmpty());
1941 : DCHECK(weak_objects_.weak_references.IsEmpty());
1942 : DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
1943 : DCHECK(weak_objects_.js_weak_refs.IsEmpty());
1944 : DCHECK(weak_objects_.weak_cells.IsEmpty());
1945 : DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
1946 74510 : DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
1947 74510 : }
1948 :
1949 74510 : void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
1950 74510 : std::pair<HeapObject, Code> weak_object_in_code;
1951 222220 : while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
1952 147710 : &weak_object_in_code)) {
1953 73200 : HeapObject object = weak_object_in_code.first;
1954 73200 : Code code = weak_object_in_code.second;
1955 74020 : if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
1956 820 : !code->embedded_objects_cleared()) {
1957 266 : if (!code->marked_for_deoptimization()) {
1958 73 : code->SetMarkedForDeoptimization("weak objects");
1959 73 : have_code_to_deoptimize_ = true;
1960 : }
1961 266 : code->ClearEmbeddedObjects(heap_);
1962 : DCHECK(code->embedded_objects_cleared());
1963 : }
1964 : }
1965 74510 : }
1966 :
1967 371124 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
1968 : DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
1969 371124 : Object potential_parent = dead_target->constructor_or_backpointer();
1970 371124 : if (potential_parent->IsMap()) {
1971 : Map parent = Map::cast(potential_parent);
1972 : DisallowHeapAllocation no_gc_obviously;
1973 845592 : if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
1974 : TransitionsAccessor(isolate(), parent, &no_gc_obviously)
1975 431712 : .HasSimpleTransitionTo(dead_target)) {
1976 14488 : ClearPotentialSimpleMapTransition(parent, dead_target);
1977 : }
1978 : }
1979 371124 : }
1980 :
1981 14488 : void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
1982 : Map dead_target) {
1983 : DCHECK(!map->is_prototype_map());
1984 : DCHECK(!dead_target->is_prototype_map());
1985 : DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
1986 : // Take ownership of the descriptor array.
1987 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
1988 14488 : DescriptorArray descriptors = map->instance_descriptors();
1989 28976 : if (descriptors == dead_target->instance_descriptors() &&
1990 : number_of_own_descriptors > 0) {
1991 3966 : TrimDescriptorArray(map, descriptors);
1992 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
1993 : }
1994 14488 : }
1995 :
1996 109298 : void MarkCompactCollector::FlushBytecodeFromSFI(
1997 : SharedFunctionInfo shared_info) {
1998 : DCHECK(shared_info->HasBytecodeArray());
1999 :
2000 : // Retain objects required for uncompiled data.
2001 109298 : String inferred_name = shared_info->inferred_name();
2002 109298 : int start_position = shared_info->StartPosition();
2003 109298 : int end_position = shared_info->EndPosition();
2004 :
2005 : shared_info->DiscardCompiledMetadata(
2006 109298 : isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
2007 : RecordSlot(object, slot, target);
2008 546490 : });
2009 :
2010 : // The size of the bytecode array should always be larger than an
2011 : // UncompiledData object.
2012 : STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
2013 : UncompiledDataWithoutPreparseData::kSize);
2014 :
2015 : // Replace bytecode array with an uncompiled data array.
2016 109298 : HeapObject compiled_data = shared_info->GetBytecodeArray();
2017 : Address compiled_data_start = compiled_data->address();
2018 109298 : int compiled_data_size = compiled_data->Size();
2019 : MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
2020 :
2021 : // Clear any recorded slots for the compiled data as being invalid.
2022 : RememberedSet<OLD_TO_NEW>::RemoveRange(
2023 : chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2024 109298 : SlotSet::PREFREE_EMPTY_BUCKETS);
2025 : RememberedSet<OLD_TO_OLD>::RemoveRange(
2026 : chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2027 109298 : SlotSet::PREFREE_EMPTY_BUCKETS);
2028 :
2029 : // Swap the map, using set_map_after_allocation to avoid verify heap checks
2030 : // which are not necessary since we are doing this during the GC atomic pause.
2031 : compiled_data->set_map_after_allocation(
2032 : ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
2033 109298 : SKIP_WRITE_BARRIER);
2034 :
2035 : // Create a filler object for any left over space in the bytecode array.
2036 109298 : if (!heap()->IsLargeObject(compiled_data)) {
2037 : heap()->CreateFillerObjectAt(
2038 : compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
2039 : compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
2040 327894 : ClearRecordedSlots::kNo);
2041 : }
2042 :
2043 : // Initialize the uncompiled data.
2044 : UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
2045 : UncompiledData::Initialize(
2046 : uncompiled_data, inferred_name, start_position, end_position,
2047 : FunctionLiteral::kIdTypeInvalid,
2048 109298 : [](HeapObject object, ObjectSlot slot, HeapObject target) {
2049 : RecordSlot(object, slot, target);
2050 327894 : });
2051 :
2052 : // Mark the uncompiled data as black, and ensure all fields have already been
2053 : // marked.
2054 : DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
2055 : non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
2056 :
2057 : // Use the raw function data setter to avoid validity checks, since we're
2058 : // performing the unusual task of decompiling.
2059 109298 : shared_info->set_function_data(uncompiled_data);
2060 : DCHECK(!shared_info->is_compiled());
2061 109298 : }
2062 :
2063 74510 : void MarkCompactCollector::ClearOldBytecodeCandidates() {
2064 : DCHECK(FLAG_flush_bytecode ||
2065 : weak_objects_.bytecode_flushing_candidates.IsEmpty());
2066 74510 : SharedFunctionInfo flushing_candidate;
2067 490253 : while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
2068 415743 : &flushing_candidate)) {
2069 : // If the BytecodeArray is dead, flush it, which will replace the field with
2070 : // an uncompiled data object.
2071 341233 : if (!non_atomic_marking_state()->IsBlackOrGrey(
2072 682466 : flushing_candidate->GetBytecodeArray())) {
2073 109298 : FlushBytecodeFromSFI(flushing_candidate);
2074 : }
2075 :
2076 : // Now record the slot, which has either been updated to an uncompiled data,
2077 : // or is the BytecodeArray which is still alive.
2078 : ObjectSlot slot = HeapObject::RawField(
2079 : flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
2080 : RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
2081 : }
2082 74510 : }
2083 :
2084 74510 : void MarkCompactCollector::ClearFlushedJsFunctions() {
2085 : DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
2086 74510 : JSFunction flushed_js_function;
2087 156710 : while (weak_objects_.flushed_js_functions.Pop(kMainThread,
2088 82200 : &flushed_js_function)) {
2089 7690 : flushed_js_function->ResetIfBytecodeFlushed();
2090 : }
2091 74510 : }
2092 :
2093 74510 : void MarkCompactCollector::ClearFullMapTransitions() {
2094 74510 : TransitionArray array;
2095 883567 : while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
2096 : int num_transitions = array->number_of_entries();
2097 734547 : if (num_transitions > 0) {
2098 553993 : Map map;
2099 : // The array might contain "undefined" elements because it's not yet
2100 : // filled. Allow it.
2101 553993 : if (array->GetTargetIfExists(0, isolate(), &map)) {
2102 : DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
2103 553993 : Map parent = Map::cast(map->constructor_or_backpointer());
2104 : bool parent_is_alive =
2105 : non_atomic_marking_state()->IsBlackOrGrey(parent);
2106 : DescriptorArray descriptors = parent_is_alive
2107 : ? parent->instance_descriptors()
2108 553993 : : DescriptorArray();
2109 : bool descriptors_owner_died =
2110 553993 : CompactTransitionArray(parent, array, descriptors);
2111 553993 : if (descriptors_owner_died) {
2112 2826 : TrimDescriptorArray(parent, descriptors);
2113 : }
2114 : }
2115 : }
2116 : }
2117 74510 : }
2118 :
2119 553993 : bool MarkCompactCollector::CompactTransitionArray(Map map,
2120 : TransitionArray transitions,
2121 : DescriptorArray descriptors) {
2122 : DCHECK(!map->is_prototype_map());
2123 : int num_transitions = transitions->number_of_entries();
2124 : bool descriptors_owner_died = false;
2125 : int transition_index = 0;
2126 : // Compact all live transitions to the left.
2127 1320165 : for (int i = 0; i < num_transitions; ++i) {
2128 766172 : Map target = transitions->GetTarget(i);
2129 : DCHECK_EQ(target->constructor_or_backpointer(), map);
2130 766172 : if (non_atomic_marking_state()->IsWhite(target)) {
2131 217494 : if (!descriptors.is_null() &&
2132 217494 : target->instance_descriptors() == descriptors) {
2133 : DCHECK(!target->is_prototype_map());
2134 : descriptors_owner_died = true;
2135 : }
2136 : } else {
2137 693674 : if (i != transition_index) {
2138 25047 : Name key = transitions->GetKey(i);
2139 : transitions->SetKey(transition_index, key);
2140 : HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
2141 : RecordSlot(transitions, key_slot, key);
2142 25047 : MaybeObject raw_target = transitions->GetRawTarget(i);
2143 : transitions->SetRawTarget(transition_index, raw_target);
2144 : HeapObjectSlot target_slot =
2145 : transitions->GetTargetSlot(transition_index);
2146 25047 : RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
2147 : }
2148 693674 : transition_index++;
2149 : }
2150 : }
2151 : // If there are no transitions to be cleared, return.
2152 553993 : if (transition_index == num_transitions) {
2153 : DCHECK(!descriptors_owner_died);
2154 : return false;
2155 : }
2156 : // Note that we never eliminate a transition array, though we might right-trim
2157 : // such that number_of_transitions() == 0. If this assumption changes,
2158 : // TransitionArray::Insert() will need to deal with the case that a transition
2159 : // array disappeared during GC.
2160 17782 : int trim = transitions->Capacity() - transition_index;
2161 17782 : if (trim > 0) {
2162 : heap_->RightTrimWeakFixedArray(transitions,
2163 17782 : trim * TransitionArray::kEntrySize);
2164 : transitions->SetNumberOfTransitions(transition_index);
2165 : }
2166 17782 : return descriptors_owner_died;
2167 : }
2168 :
2169 5791 : void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
2170 : int descriptors_to_trim) {
2171 5791 : int old_nof_all_descriptors = array->number_of_all_descriptors();
2172 5791 : int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2173 : DCHECK_LT(0, descriptors_to_trim);
2174 : DCHECK_LE(0, new_nof_all_descriptors);
2175 : Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
2176 : Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
2177 : RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
2178 : start, end,
2179 5791 : SlotSet::PREFREE_EMPTY_BUCKETS);
2180 : RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
2181 : start, end,
2182 5791 : SlotSet::PREFREE_EMPTY_BUCKETS);
2183 : heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2184 5791 : ClearRecordedSlots::kNo);
2185 5791 : array->set_number_of_all_descriptors(new_nof_all_descriptors);
2186 5791 : }
2187 :
2188 6792 : void MarkCompactCollector::TrimDescriptorArray(Map map,
2189 : DescriptorArray descriptors) {
2190 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2191 6792 : if (number_of_own_descriptors == 0) {
2192 : DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2193 6792 : return;
2194 : }
2195 : // TODO(ulan): Trim only if slack is greater than some percentage threshold.
2196 : int to_trim =
2197 6738 : descriptors->number_of_all_descriptors() - number_of_own_descriptors;
2198 6738 : if (to_trim > 0) {
2199 5791 : descriptors->set_number_of_descriptors(number_of_own_descriptors);
2200 5791 : RightTrimDescriptorArray(descriptors, to_trim);
2201 :
2202 5791 : TrimEnumCache(map, descriptors);
2203 5791 : descriptors->Sort();
2204 :
2205 : if (FLAG_unbox_double_fields) {
2206 5791 : LayoutDescriptor layout_descriptor = map->layout_descriptor();
2207 : layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2208 5791 : number_of_own_descriptors);
2209 : SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2210 : }
2211 : }
2212 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2213 6738 : map->set_owns_descriptors(true);
2214 : }
2215 :
2216 5791 : void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
2217 : int live_enum = map->EnumLength();
2218 5791 : if (live_enum == kInvalidEnumCacheSentinel) {
2219 5745 : live_enum = map->NumberOfEnumerableProperties();
2220 : }
2221 11502 : if (live_enum == 0) return descriptors->ClearEnumCache();
2222 5734 : EnumCache enum_cache = descriptors->enum_cache();
2223 :
2224 5734 : FixedArray keys = enum_cache->keys();
2225 5734 : int to_trim = keys->length() - live_enum;
2226 5734 : if (to_trim <= 0) return;
2227 89 : heap_->RightTrimFixedArray(keys, to_trim);
2228 :
2229 89 : FixedArray indices = enum_cache->indices();
2230 89 : to_trim = indices->length() - live_enum;
2231 89 : if (to_trim <= 0) return;
2232 80 : heap_->RightTrimFixedArray(indices, to_trim);
2233 : }
2234 :
2235 74510 : void MarkCompactCollector::ClearWeakCollections() {
2236 372550 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2237 74510 : EphemeronHashTable table;
2238 :
2239 157483 : while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
2240 39824 : for (int i = 0; i < table->Capacity(); i++) {
2241 39824 : HeapObject key = HeapObject::cast(table->KeyAt(i));
2242 : #ifdef VERIFY_HEAP
2243 : Object value = table->ValueAt(i);
2244 :
2245 : if (value->IsHeapObject()) {
2246 : CHECK_IMPLIES(
2247 : non_atomic_marking_state()->IsBlackOrGrey(key),
2248 : non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
2249 : }
2250 : #endif
2251 39824 : if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2252 88 : table->RemoveEntry(i);
2253 : }
2254 : }
2255 74510 : }
2256 74510 : }
2257 :
2258 74510 : void MarkCompactCollector::ClearWeakReferences() {
2259 372550 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2260 : std::pair<HeapObject, HeapObjectSlot> slot;
2261 : HeapObjectReference cleared_weak_ref =
2262 : HeapObjectReference::ClearedValue(isolate());
2263 30653218 : while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
2264 30504198 : HeapObject value;
2265 : // The slot could have been overwritten, so we have to treat it
2266 : // as MaybeObjectSlot.
2267 : MaybeObjectSlot location(slot.second);
2268 30504198 : if ((*location)->GetHeapObjectIfWeak(&value)) {
2269 : DCHECK(!value->IsCell());
2270 30479977 : if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2271 : // The value of the weak reference is alive.
2272 : RecordSlot(slot.first, HeapObjectSlot(location), value);
2273 : } else {
2274 2820246 : if (value->IsMap()) {
2275 : // The map is non-live.
2276 371124 : ClearPotentialSimpleMapTransition(Map::cast(value));
2277 : }
2278 : location.store(cleared_weak_ref);
2279 : }
2280 : }
2281 74510 : }
2282 74510 : }
2283 :
2284 74510 : void MarkCompactCollector::ClearJSWeakRefs() {
2285 74510 : if (!FLAG_harmony_weak_refs) {
2286 74114 : return;
2287 : }
2288 396 : JSWeakRef weak_ref;
2289 918 : while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
2290 252 : HeapObject target = HeapObject::cast(weak_ref->target());
2291 126 : if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2292 281 : weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
2293 : } else {
2294 : // The value of the JSWeakRef is alive.
2295 : ObjectSlot slot =
2296 : HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
2297 : RecordSlot(weak_ref, slot, target);
2298 : }
2299 : }
2300 396 : WeakCell weak_cell;
2301 1070 : while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
2302 556 : HeapObject target = HeapObject::cast(weak_cell->target());
2303 278 : if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2304 : DCHECK(!target->IsUndefined());
2305 : // The value of the WeakCell is dead.
2306 : JSFinalizationGroup finalization_group =
2307 261 : JSFinalizationGroup::cast(weak_cell->finalization_group());
2308 261 : if (!finalization_group->scheduled_for_cleanup()) {
2309 : heap()->AddDirtyJSFinalizationGroup(
2310 : finalization_group,
2311 207 : [](HeapObject object, ObjectSlot slot, Object target) {
2312 207 : if (target->IsHeapObject()) {
2313 : RecordSlot(object, slot, HeapObject::cast(target));
2314 : }
2315 621 : });
2316 : }
2317 : // We're modifying the pointers in WeakCell and JSFinalizationGroup during
2318 : // GC; thus we need to record the slots it writes. The normal write
2319 : // barrier is not enough, since it's disabled before GC.
2320 : weak_cell->Nullify(isolate(),
2321 847 : [](HeapObject object, ObjectSlot slot, Object target) {
2322 847 : if (target->IsHeapObject()) {
2323 : RecordSlot(object, slot, HeapObject::cast(target));
2324 : }
2325 1369 : });
2326 : DCHECK(finalization_group->NeedsCleanup());
2327 : DCHECK(finalization_group->scheduled_for_cleanup());
2328 : } else {
2329 : // The value of the WeakCell is alive.
2330 : ObjectSlot slot =
2331 : HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
2332 : RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2333 : }
2334 : }
2335 : }
2336 :
2337 61034 : void MarkCompactCollector::AbortWeakObjects() {
2338 61034 : weak_objects_.transition_arrays.Clear();
2339 61034 : weak_objects_.ephemeron_hash_tables.Clear();
2340 61034 : weak_objects_.current_ephemerons.Clear();
2341 61034 : weak_objects_.next_ephemerons.Clear();
2342 61034 : weak_objects_.discovered_ephemerons.Clear();
2343 61034 : weak_objects_.weak_references.Clear();
2344 61034 : weak_objects_.weak_objects_in_code.Clear();
2345 61034 : weak_objects_.js_weak_refs.Clear();
2346 61034 : weak_objects_.weak_cells.Clear();
2347 61034 : weak_objects_.bytecode_flushing_candidates.Clear();
2348 61034 : weak_objects_.flushed_js_functions.Clear();
2349 61034 : }
2350 :
2351 0 : bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
2352 0 : return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
2353 : }
2354 :
2355 : MarkCompactCollector::RecordRelocSlotInfo
2356 7904644 : MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
2357 : HeapObject target) {
2358 : RecordRelocSlotInfo result;
2359 7582189 : result.should_record = false;
2360 : Page* target_page = Page::FromHeapObject(target);
2361 : Page* source_page = Page::FromHeapObject(host);
2362 15164378 : if (target_page->IsEvacuationCandidate() &&
2363 357201 : (rinfo->host().is_null() ||
2364 : !source_page->ShouldSkipEvacuationSlotRecording())) {
2365 : RelocInfo::Mode rmode = rinfo->rmode();
2366 : Address addr = rinfo->pc();
2367 : SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2368 322455 : if (rinfo->IsInConstantPool()) {
2369 : addr = rinfo->constant_pool_entry_address();
2370 : if (RelocInfo::IsCodeTargetMode(rmode)) {
2371 : slot_type = CODE_ENTRY_SLOT;
2372 : } else {
2373 : DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2374 : slot_type = OBJECT_SLOT;
2375 : }
2376 : }
2377 644824 : uintptr_t offset = addr - source_page->address();
2378 : DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
2379 322412 : result.should_record = true;
2380 322412 : result.memory_chunk = source_page;
2381 322412 : result.slot_type = slot_type;
2382 322412 : result.offset = static_cast<uint32_t>(offset);
2383 : }
2384 7582146 : return result;
2385 : }
2386 :
2387 2778225 : void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
2388 : HeapObject target) {
2389 2778225 : RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
2390 2778225 : if (info.should_record) {
2391 : RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
2392 134862 : info.offset);
2393 : }
2394 2778225 : }
2395 :
2396 : namespace {
2397 :
2398 : // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
2399 : // attempt to store a weak reference to strong-only slot to a compilation error.
2400 : template <typename TSlot, HeapObjectReferenceType reference_type>
2401 : typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
2402 :
2403 : template <>
2404 : Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
2405 : HeapObject heap_object) {
2406 : return heap_object;
2407 : }
2408 :
2409 : template <>
2410 : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2411 : HeapObject heap_object) {
2412 : return HeapObjectReference::Strong(heap_object);
2413 : }
2414 :
2415 : template <>
2416 : MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
2417 : HeapObject heap_object) {
2418 : return HeapObjectReference::Weak(heap_object);
2419 : }
2420 :
2421 : #ifdef V8_COMPRESS_POINTERS
2422 : template <>
2423 : Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
2424 : HeapObject heap_object) {
2425 : return heap_object;
2426 : }
2427 :
2428 : template <>
2429 : MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2430 : HeapObject heap_object) {
2431 : return HeapObjectReference::Strong(heap_object);
2432 : }
2433 :
2434 : // The following specialization
2435 : // MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
2436 : // is not used.
2437 : #endif
2438 :
2439 : template <AccessMode access_mode, HeapObjectReferenceType reference_type,
2440 : typename TSlot>
2441 445862357 : static inline SlotCallbackResult UpdateSlot(TSlot slot,
2442 : typename TSlot::TObject old,
2443 : HeapObject heap_obj) {
2444 : static_assert(
2445 : std::is_same<TSlot, FullObjectSlot>::value ||
2446 : std::is_same<TSlot, ObjectSlot>::value ||
2447 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
2448 : std::is_same<TSlot, MaybeObjectSlot>::value,
2449 : "Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
2450 : MapWord map_word = heap_obj->map_word();
2451 445862357 : if (map_word.IsForwardingAddress()) {
2452 : DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
2453 : MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2454 : Page::FromHeapObject(heap_obj)->IsFlagSet(
2455 : Page::COMPACTION_WAS_ABORTED));
2456 : typename TSlot::TObject target =
2457 : MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
2458 : if (access_mode == AccessMode::NON_ATOMIC) {
2459 : slot.store(target);
2460 : } else {
2461 : slot.Release_CompareAndSwap(old, target);
2462 : }
2463 : DCHECK(!Heap::InFromPage(target));
2464 : DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2465 : } else {
2466 : DCHECK(heap_obj->map()->IsMap());
2467 : }
2468 : // OLD_TO_OLD slots are always removed after updating.
2469 445862357 : return REMOVE_SLOT;
2470 : }
2471 :
2472 : template <AccessMode access_mode, typename TSlot>
2473 106099908 : static inline SlotCallbackResult UpdateSlot(TSlot slot) {
2474 106099908 : typename TSlot::TObject obj = slot.Relaxed_Load();
2475 106099908 : HeapObject heap_obj;
2476 106099908 : if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
2477 7281644 : UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
2478 99023500 : } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
2479 : return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2480 66351361 : heap_obj);
2481 : }
2482 : return REMOVE_SLOT;
2483 : }
2484 :
2485 : template <AccessMode access_mode, typename TSlot>
2486 410036042 : static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
2487 : DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
2488 410036042 : typename TSlot::TObject obj = slot.Relaxed_Load();
2489 410036042 : HeapObject heap_obj;
2490 410036042 : if (obj.GetHeapObject(&heap_obj)) {
2491 : return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2492 365290307 : heap_obj);
2493 : }
2494 : return REMOVE_SLOT;
2495 : }
2496 :
2497 : } // namespace
2498 :
2499 : // Visitor for updating root pointers and to-space pointers.
2500 : // It does not expect to encounter pointers to dead objects.
2501 151047 : class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
2502 : public:
2503 152390 : PointersUpdatingVisitor() {}
2504 :
2505 32707 : void VisitPointer(HeapObject host, ObjectSlot p) override {
2506 : UpdateStrongSlotInternal(p);
2507 32707 : }
2508 :
2509 12 : void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
2510 : UpdateSlotInternal(p);
2511 12 : }
2512 :
2513 14848738 : void VisitPointers(HeapObject host, ObjectSlot start,
2514 : ObjectSlot end) override {
2515 175136194 : for (ObjectSlot p = start; p < end; ++p) {
2516 : UpdateStrongSlotInternal(p);
2517 : }
2518 14800562 : }
2519 :
2520 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
2521 : MaybeObjectSlot end) final {
2522 62722088 : for (MaybeObjectSlot p = start; p < end; ++p) {
2523 : UpdateSlotInternal(p);
2524 : }
2525 0 : }
2526 :
2527 231000860 : void VisitRootPointer(Root root, const char* description,
2528 : FullObjectSlot p) override {
2529 : UpdateRootSlotInternal(p);
2530 231000860 : }
2531 :
2532 1780724 : void VisitRootPointers(Root root, const char* description,
2533 : FullObjectSlot start, FullObjectSlot end) override {
2534 36326838 : for (FullObjectSlot p = start; p < end; ++p) {
2535 : UpdateRootSlotInternal(p);
2536 : }
2537 1780724 : }
2538 :
2539 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
2540 : // This visitor nevers visits code objects.
2541 0 : UNREACHABLE();
2542 : }
2543 :
2544 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
2545 : // This visitor nevers visits code objects.
2546 0 : UNREACHABLE();
2547 : }
2548 :
2549 : private:
2550 : static inline SlotCallbackResult UpdateRootSlotInternal(FullObjectSlot slot) {
2551 263766250 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2552 : }
2553 :
2554 : static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2555 : MaybeObjectSlot slot) {
2556 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2557 : }
2558 :
2559 : static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
2560 145519601 : return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2561 : }
2562 :
2563 : static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
2564 59607149 : return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
2565 : }
2566 : };
2567 :
2568 109461 : static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
2569 : FullObjectSlot p) {
2570 : MapWord map_word = HeapObject::cast(*p)->map_word();
2571 :
2572 109461 : if (map_word.IsForwardingAddress()) {
2573 507 : String new_string = String::cast(map_word.ToForwardingAddress());
2574 :
2575 507 : if (new_string->IsExternalString()) {
2576 : MemoryChunk::MoveExternalBackingStoreBytes(
2577 : ExternalBackingStoreType::kExternalString,
2578 : Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2579 1014 : ExternalString::cast(new_string)->ExternalPayloadSize());
2580 : }
2581 507 : return new_string;
2582 : }
2583 :
2584 : return String::cast(*p);
2585 : }
2586 :
2587 74510 : void MarkCompactCollector::EvacuatePrologue() {
2588 : // New space.
2589 223530 : NewSpace* new_space = heap()->new_space();
2590 : // Append the list of new space pages to be processed.
2591 158950 : for (Page* p :
2592 84440 : PageRange(new_space->first_allocatable_address(), new_space->top())) {
2593 84440 : new_space_evacuation_pages_.push_back(p);
2594 : }
2595 74510 : new_space->Flip();
2596 74510 : new_space->ResetLinearAllocationArea();
2597 :
2598 74510 : heap()->new_lo_space()->Flip();
2599 74510 : heap()->new_lo_space()->ResetPendingObject();
2600 :
2601 : // Old space.
2602 : DCHECK(old_space_evacuation_pages_.empty());
2603 74510 : old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2604 : evacuation_candidates_.clear();
2605 : DCHECK(evacuation_candidates_.empty());
2606 74510 : }
2607 :
2608 74510 : void MarkCompactCollector::EvacuateEpilogue() {
2609 : aborted_evacuation_candidates_.clear();
2610 : // New space.
2611 372550 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
2612 : // Deallocate unmarked large objects.
2613 74510 : heap()->lo_space()->FreeUnmarkedObjects();
2614 74510 : heap()->code_lo_space()->FreeUnmarkedObjects();
2615 74510 : heap()->new_lo_space()->FreeUnmarkedObjects();
2616 : // Old space. Deallocate evacuated candidate pages.
2617 74510 : ReleaseEvacuationCandidates();
2618 : // Give pages that are queued to be freed back to the OS.
2619 74510 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2620 : #ifdef DEBUG
2621 : // Old-to-old slot sets must be empty after evacuation.
2622 : for (Page* p : *heap()->old_space()) {
2623 : DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2624 : DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2625 : DCHECK_NULL(p->invalidated_slots());
2626 : }
2627 : #endif
2628 74510 : }
2629 :
2630 : class Evacuator : public Malloced {
2631 : public:
2632 : enum EvacuationMode {
2633 : kObjectsNewToOld,
2634 : kPageNewToOld,
2635 : kObjectsOldToOld,
2636 : kPageNewToNew,
2637 : };
2638 :
2639 : static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2640 : // Note: The order of checks is important in this function.
2641 173759 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2642 : return kPageNewToOld;
2643 172685 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2644 : return kPageNewToNew;
2645 169153 : if (chunk->InYoungGeneration()) return kObjectsNewToOld;
2646 : return kObjectsOldToOld;
2647 : }
2648 :
2649 : // NewSpacePages with more live bytes than this threshold qualify for fast
2650 : // evacuation.
2651 56933 : static intptr_t NewSpacePageEvacuationThreshold() {
2652 56933 : if (FLAG_page_promotion)
2653 113826 : return FLAG_page_promotion_threshold *
2654 113826 : MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
2655 20 : return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
2656 : }
2657 :
2658 77254 : Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
2659 : : heap_(heap),
2660 : local_allocator_(heap_),
2661 : local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2662 : new_space_visitor_(heap_, &local_allocator_, record_visitor,
2663 : &local_pretenuring_feedback_),
2664 : new_to_new_page_visitor_(heap_, record_visitor,
2665 : &local_pretenuring_feedback_),
2666 : new_to_old_page_visitor_(heap_, record_visitor,
2667 : &local_pretenuring_feedback_),
2668 :
2669 : old_space_visitor_(heap_, &local_allocator_, record_visitor),
2670 : duration_(0.0),
2671 231762 : bytes_compacted_(0) {}
2672 :
2673 231762 : virtual ~Evacuator() = default;
2674 :
2675 : void EvacuatePage(MemoryChunk* chunk);
2676 :
2677 722 : void AddObserver(MigrationObserver* observer) {
2678 : new_space_visitor_.AddObserver(observer);
2679 : old_space_visitor_.AddObserver(observer);
2680 722 : }
2681 :
2682 : // Merge back locally cached info sequentially. Note that this method needs
2683 : // to be called from the main thread.
2684 : inline void Finalize();
2685 :
2686 : virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2687 :
2688 : protected:
2689 : static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2690 :
2691 : // |saved_live_bytes| returns the live bytes of the page that was processed.
2692 : virtual void RawEvacuatePage(MemoryChunk* chunk,
2693 : intptr_t* saved_live_bytes) = 0;
2694 :
2695 : inline Heap* heap() { return heap_; }
2696 :
2697 : void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2698 79017 : duration_ += duration;
2699 79017 : bytes_compacted_ += bytes_compacted;
2700 : }
2701 :
2702 : Heap* heap_;
2703 :
2704 : // Locally cached collector data.
2705 : LocalAllocator local_allocator_;
2706 : Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2707 :
2708 : // Visitors for the corresponding spaces.
2709 : EvacuateNewSpaceVisitor new_space_visitor_;
2710 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
2711 : new_to_new_page_visitor_;
2712 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
2713 : new_to_old_page_visitor_;
2714 : EvacuateOldSpaceVisitor old_space_visitor_;
2715 :
2716 : // Book keeping info.
2717 : double duration_;
2718 : intptr_t bytes_compacted_;
2719 : };
2720 :
2721 157847 : void Evacuator::EvacuatePage(MemoryChunk* chunk) {
2722 157847 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
2723 : DCHECK(chunk->SweepingDone());
2724 78926 : intptr_t saved_live_bytes = 0;
2725 78926 : double evacuation_time = 0.0;
2726 : {
2727 : AlwaysAllocateScope always_allocate(heap()->isolate());
2728 : TimedScope timed_scope(&evacuation_time);
2729 78919 : RawEvacuatePage(chunk, &saved_live_bytes);
2730 : }
2731 79017 : ReportCompactionProgress(evacuation_time, saved_live_bytes);
2732 79017 : if (FLAG_trace_evacuation) {
2733 : PrintIsolate(heap()->isolate(),
2734 : "evacuation[%p]: page=%p new_space=%d "
2735 : "page_evacuation=%d executable=%d contains_age_mark=%d "
2736 : "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
2737 : static_cast<void*>(this), static_cast<void*>(chunk),
2738 : chunk->InNewSpace(),
2739 0 : chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2740 : chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2741 : chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2742 0 : chunk->Contains(heap()->new_space()->age_mark()),
2743 : saved_live_bytes, evacuation_time,
2744 0 : chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2745 79017 : }
2746 79018 : }
2747 :
2748 463524 : void Evacuator::Finalize() {
2749 77254 : local_allocator_.Finalize();
2750 154508 : heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
2751 309016 : heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
2752 231762 : new_to_old_page_visitor_.moved_bytes());
2753 : heap()->IncrementSemiSpaceCopiedObjectSize(
2754 77254 : new_space_visitor_.semispace_copied_size() +
2755 231762 : new_to_new_page_visitor_.moved_bytes());
2756 : heap()->IncrementYoungSurvivorsCounter(
2757 77254 : new_space_visitor_.promoted_size() +
2758 77254 : new_space_visitor_.semispace_copied_size() +
2759 77254 : new_to_old_page_visitor_.moved_bytes() +
2760 77254 : new_to_new_page_visitor_.moved_bytes());
2761 154508 : heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
2762 77254 : }
2763 :
2764 154508 : class FullEvacuator : public Evacuator {
2765 : public:
2766 : FullEvacuator(MarkCompactCollector* collector,
2767 : RecordMigratedSlotVisitor* record_visitor)
2768 77254 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
2769 :
2770 76407 : GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
2771 76407 : return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
2772 : }
2773 :
2774 : protected:
2775 : void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
2776 :
2777 : MarkCompactCollector* collector_;
2778 : };
2779 :
2780 160082 : void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
2781 78852 : const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
2782 157715 : TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2783 : "FullEvacuator::RawEvacuatePage", "evacuation_mode",
2784 : evacuation_mode);
2785 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
2786 78863 : collector_->non_atomic_marking_state();
2787 78863 : *live_bytes = marking_state->live_bytes(chunk);
2788 78863 : HeapObject failed_object;
2789 78863 : switch (evacuation_mode) {
2790 : case kObjectsNewToOld:
2791 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2792 : chunk, marking_state, &new_space_visitor_,
2793 66165 : LiveObjectVisitor::kClearMarkbits);
2794 : // ArrayBufferTracker will be updated during pointers updating.
2795 66186 : break;
2796 : case kPageNewToOld:
2797 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2798 : chunk, marking_state, &new_to_old_page_visitor_,
2799 587 : LiveObjectVisitor::kKeepMarking);
2800 : new_to_old_page_visitor_.account_moved_bytes(
2801 : marking_state->live_bytes(chunk));
2802 : // ArrayBufferTracker will be updated during sweeping.
2803 : break;
2804 : case kPageNewToNew:
2805 : LiveObjectVisitor::VisitBlackObjectsNoFail(
2806 : chunk, marking_state, &new_to_new_page_visitor_,
2807 1778 : LiveObjectVisitor::kKeepMarking);
2808 : new_to_new_page_visitor_.account_moved_bytes(
2809 : marking_state->live_bytes(chunk));
2810 : // ArrayBufferTracker will be updated during sweeping.
2811 : break;
2812 : case kObjectsOldToOld: {
2813 : const bool success = LiveObjectVisitor::VisitBlackObjects(
2814 : chunk, marking_state, &old_space_visitor_,
2815 10370 : LiveObjectVisitor::kClearMarkbits, &failed_object);
2816 10467 : if (!success) {
2817 : // Aborted compaction page. Actual processing happens on the main
2818 : // thread for simplicity reasons.
2819 25 : collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
2820 : } else {
2821 : // ArrayBufferTracker will be updated during pointers updating.
2822 : }
2823 : break;
2824 : }
2825 78983 : }
2826 79017 : }
2827 :
2828 : class EvacuationItem : public ItemParallelJob::Item {
2829 : public:
2830 79020 : explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
2831 158040 : ~EvacuationItem() override = default;
2832 : MemoryChunk* chunk() const { return chunk_; }
2833 :
2834 : private:
2835 : MemoryChunk* chunk_;
2836 : };
2837 :
2838 154504 : class PageEvacuationTask : public ItemParallelJob::Task {
2839 : public:
2840 : PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
2841 : : ItemParallelJob::Task(isolate),
2842 : evacuator_(evacuator),
2843 154508 : tracer_(isolate->heap()->tracer()) {}
2844 :
2845 76426 : void RunInParallel() override {
2846 305753 : TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
2847 79001 : EvacuationItem* item = nullptr;
2848 155441 : while ((item = GetItem<EvacuationItem>()) != nullptr) {
2849 79001 : evacuator_->EvacuatePage(item->chunk());
2850 79019 : item->MarkFinished();
2851 76479 : }
2852 76487 : }
2853 :
2854 : private:
2855 : Evacuator* evacuator_;
2856 : GCTracer* tracer_;
2857 : };
2858 :
2859 : template <class Evacuator, class Collector>
2860 62380 : void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
2861 : Collector* collector, ItemParallelJob* job,
2862 : RecordMigratedSlotVisitor* record_visitor,
2863 139634 : MigrationObserver* migration_observer, const intptr_t live_bytes) {
2864 : // Used for trace summary.
2865 : double compaction_speed = 0;
2866 62380 : if (FLAG_trace_evacuation) {
2867 0 : compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
2868 : }
2869 :
2870 62380 : const bool profiling = isolate()->LogObjectRelocation();
2871 : ProfilingMigrationObserver profiling_observer(heap());
2872 :
2873 : const int wanted_num_tasks =
2874 62380 : NumberOfParallelCompactionTasks(job->NumberOfItems());
2875 62380 : Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
2876 139634 : for (int i = 0; i < wanted_num_tasks; i++) {
2877 77254 : evacuators[i] = new Evacuator(collector, record_visitor);
2878 77254 : if (profiling) evacuators[i]->AddObserver(&profiling_observer);
2879 77254 : if (migration_observer != nullptr)
2880 0 : evacuators[i]->AddObserver(migration_observer);
2881 154508 : job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
2882 : }
2883 62380 : job->Run();
2884 139634 : for (int i = 0; i < wanted_num_tasks; i++) {
2885 77254 : evacuators[i]->Finalize();
2886 77254 : delete evacuators[i];
2887 : }
2888 62380 : delete[] evacuators;
2889 :
2890 62380 : if (FLAG_trace_evacuation) {
2891 0 : PrintIsolate(isolate(),
2892 : "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
2893 : "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
2894 : " compaction_speed=%.f\n",
2895 : isolate()->time_millis_since_init(),
2896 : FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
2897 : wanted_num_tasks, job->NumberOfTasks(),
2898 0 : V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
2899 0 : live_bytes, compaction_speed);
2900 : }
2901 62380 : }
2902 :
2903 70925 : bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
2904 68553 : const bool reduce_memory = heap()->ShouldReduceMemory();
2905 68553 : const Address age_mark = heap()->new_space()->age_mark();
2906 113866 : return !reduce_memory && !p->NeverEvacuate() &&
2907 60139 : (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
2908 73297 : !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
2909 : }
2910 :
2911 74510 : void MarkCompactCollector::EvacuatePagesInParallel() {
2912 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
2913 224118 : &page_parallel_job_semaphore_);
2914 : intptr_t live_bytes = 0;
2915 :
2916 159487 : for (Page* page : old_space_evacuation_pages_) {
2917 10467 : live_bytes += non_atomic_marking_state()->live_bytes(page);
2918 10467 : evacuation_job.AddItem(new EvacuationItem(page));
2919 : }
2920 :
2921 234048 : for (Page* page : new_space_evacuation_pages_) {
2922 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
2923 84440 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
2924 68553 : live_bytes += live_bytes_on_page;
2925 68553 : if (ShouldMovePage(page, live_bytes_on_page)) {
2926 2367 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
2927 588 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
2928 : DCHECK_EQ(heap()->old_space(), page->owner());
2929 : // The move added page->allocated_bytes to the old space, but we are
2930 : // going to sweep the page and add page->live_byte_count.
2931 588 : heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
2932 : page);
2933 : } else {
2934 1779 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
2935 : }
2936 : }
2937 68553 : evacuation_job.AddItem(new EvacuationItem(page));
2938 : }
2939 :
2940 : // Promote young generation large objects.
2941 : IncrementalMarking::NonAtomicMarkingState* marking_state =
2942 : heap()->incremental_marking()->non_atomic_marking_state();
2943 :
2944 149020 : for (auto it = heap()->new_lo_space()->begin();
2945 : it != heap()->new_lo_space()->end();) {
2946 : LargePage* current = *it;
2947 : it++;
2948 : HeapObject object = current->GetObject();
2949 : DCHECK(!marking_state->IsGrey(object));
2950 0 : if (marking_state->IsBlack(object)) {
2951 0 : heap_->lo_space()->PromoteNewLargeObject(current);
2952 : current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
2953 0 : evacuation_job.AddItem(new EvacuationItem(current));
2954 : }
2955 : }
2956 :
2957 86640 : if (evacuation_job.NumberOfItems() == 0) return;
2958 :
2959 : RecordMigratedSlotVisitor record_visitor(this);
2960 : CreateAndExecuteEvacuationTasks<FullEvacuator>(
2961 62380 : this, &evacuation_job, &record_visitor, nullptr, live_bytes);
2962 124760 : PostProcessEvacuationCandidates();
2963 : }
2964 :
2965 74510 : class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2966 : public:
2967 149020 : Object RetainAs(Object object) override {
2968 149020 : if (object->IsHeapObject()) {
2969 : HeapObject heap_object = HeapObject::cast(object);
2970 : MapWord map_word = heap_object->map_word();
2971 149020 : if (map_word.IsForwardingAddress()) {
2972 3234 : return map_word.ToForwardingAddress();
2973 : }
2974 : }
2975 145786 : return object;
2976 : }
2977 : };
2978 :
2979 0 : void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
2980 0 : EvacuateRecordOnlyVisitor visitor(heap());
2981 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
2982 : &visitor,
2983 0 : LiveObjectVisitor::kKeepMarking);
2984 0 : }
2985 :
2986 : template <class Visitor, typename MarkingState>
2987 10343 : bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
2988 : MarkingState* marking_state,
2989 : Visitor* visitor,
2990 : IterationMode iteration_mode,
2991 : HeapObject* failed_object) {
2992 20692 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2993 : "LiveObjectVisitor::VisitBlackObjects");
2994 67980376 : for (auto object_and_size :
2995 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2996 : HeapObject const object = object_and_size.first;
2997 33979747 : if (!visitor->Visit(object, object_and_size.second)) {
2998 25 : if (iteration_mode == kClearMarkbits) {
2999 25 : marking_state->bitmap(chunk)->ClearRange(
3000 : chunk->AddressToMarkbitIndex(chunk->area_start()),
3001 : chunk->AddressToMarkbitIndex(object->address()));
3002 25 : *failed_object = object;
3003 : }
3004 : return false;
3005 : }
3006 : }
3007 10441 : if (iteration_mode == kClearMarkbits) {
3008 : marking_state->ClearLiveness(chunk);
3009 : }
3010 10466 : return true;
3011 : }
3012 :
3013 : template <class Visitor, typename MarkingState>
3014 68518 : void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
3015 : MarkingState* marking_state,
3016 : Visitor* visitor,
3017 : IterationMode iteration_mode) {
3018 137048 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3019 : "LiveObjectVisitor::VisitBlackObjectsNoFail");
3020 68530 : if (chunk->IsLargePage()) {
3021 0 : HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
3022 0 : if (marking_state->IsBlack(object)) {
3023 0 : const bool success = visitor->Visit(object, object->Size());
3024 : USE(success);
3025 : DCHECK(success);
3026 : }
3027 : } else {
3028 87569646 : for (auto object_and_size :
3029 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3030 41117510 : HeapObject const object = object_and_size.first;
3031 : DCHECK(marking_state->IsBlack(object));
3032 41117510 : const bool success = visitor->Visit(object, object_and_size.second);
3033 : USE(success);
3034 : DCHECK(success);
3035 : }
3036 : }
3037 68578 : if (iteration_mode == kClearMarkbits) {
3038 : marking_state->ClearLiveness(chunk);
3039 68578 : }
3040 68578 : }
3041 :
3042 : template <class Visitor, typename MarkingState>
3043 0 : void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
3044 : MarkingState* marking_state,
3045 : Visitor* visitor,
3046 : IterationMode iteration_mode) {
3047 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3048 : "LiveObjectVisitor::VisitGreyObjectsNoFail");
3049 0 : if (chunk->IsLargePage()) {
3050 0 : HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
3051 0 : if (marking_state->IsGrey(object)) {
3052 0 : const bool success = visitor->Visit(object, object->Size());
3053 : USE(success);
3054 : DCHECK(success);
3055 : }
3056 : } else {
3057 0 : for (auto object_and_size :
3058 : LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
3059 0 : HeapObject const object = object_and_size.first;
3060 : DCHECK(marking_state->IsGrey(object));
3061 0 : const bool success = visitor->Visit(object, object_and_size.second);
3062 : USE(success);
3063 : DCHECK(success);
3064 : }
3065 : }
3066 0 : if (iteration_mode == kClearMarkbits) {
3067 0 : marking_state->ClearLiveness(chunk);
3068 0 : }
3069 0 : }
3070 :
3071 : template <typename MarkingState>
3072 25 : void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
3073 : MarkingState* marking_state) {
3074 : int new_live_size = 0;
3075 180 : for (auto object_and_size :
3076 : LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
3077 65 : new_live_size += object_and_size.second;
3078 : }
3079 25 : marking_state->SetLiveBytes(chunk, new_live_size);
3080 25 : }
3081 :
3082 76902 : void MarkCompactCollector::Evacuate() {
3083 968630 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3084 74510 : base::MutexGuard guard(heap()->relocation_mutex());
3085 :
3086 : {
3087 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
3088 149020 : EvacuatePrologue();
3089 : }
3090 :
3091 : {
3092 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3093 : EvacuationScope evacuation_scope(this);
3094 149020 : EvacuatePagesInParallel();
3095 : }
3096 :
3097 74510 : UpdatePointersAfterEvacuation();
3098 :
3099 : {
3100 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
3101 74510 : if (!heap()->new_space()->Rebalance()) {
3102 0 : heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
3103 74510 : }
3104 : }
3105 :
3106 : // Give pages that are queued to be freed back to the OS. Note that filtering
3107 : // slots only handles old space (for unboxed doubles), and thus map space can
3108 : // still contain stale pointers. We only free the chunks after pointer updates
3109 : // to still have access to page headers.
3110 74510 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3111 :
3112 : {
3113 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3114 :
3115 233460 : for (Page* p : new_space_evacuation_pages_) {
3116 84440 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3117 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3118 1779 : sweeper()->AddPageForIterability(p);
3119 82661 : } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3120 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3121 : DCHECK_EQ(OLD_SPACE, p->owner()->identity());
3122 588 : sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
3123 : }
3124 : }
3125 : new_space_evacuation_pages_.clear();
3126 :
3127 159487 : for (Page* p : old_space_evacuation_pages_) {
3128 : // Important: skip list should be cleared only after roots were updated
3129 : // because root iteration traverses the stack and might have to find
3130 : // code objects from non-updated pc pointing into evacuation candidate.
3131 10467 : SkipList* list = p->skip_list();
3132 10467 : if (list != nullptr) list->Clear();
3133 10467 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3134 50 : sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
3135 : p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3136 : }
3137 74510 : }
3138 : }
3139 :
3140 : {
3141 298040 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
3142 149020 : EvacuateEpilogue();
3143 74510 : }
3144 :
3145 : #ifdef VERIFY_HEAP
3146 : if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
3147 : FullEvacuationVerifier verifier(heap());
3148 : verifier.Run();
3149 : }
3150 : #endif
3151 74510 : }
3152 :
3153 474687 : class UpdatingItem : public ItemParallelJob::Item {
3154 : public:
3155 474687 : ~UpdatingItem() override = default;
3156 : virtual void Process() = 0;
3157 : };
3158 :
3159 633226 : class PointersUpdatingTask : public ItemParallelJob::Task {
3160 : public:
3161 : explicit PointersUpdatingTask(Isolate* isolate,
3162 : GCTracer::BackgroundScope::ScopeId scope)
3163 : : ItemParallelJob::Task(isolate),
3164 : tracer_(isolate->heap()->tracer()),
3165 633948 : scope_(scope) {}
3166 :
3167 284242 : void RunInParallel() override {
3168 1137729 : TRACE_BACKGROUND_GC(tracer_, scope_);
3169 : UpdatingItem* item = nullptr;
3170 758837 : while ((item = GetItem<UpdatingItem>()) != nullptr) {
3171 474346 : item->Process();
3172 473840 : item->MarkFinished();
3173 284256 : }
3174 284908 : }
3175 :
3176 : private:
3177 : GCTracer* tracer_;
3178 : GCTracer::BackgroundScope::ScopeId scope_;
3179 : };
3180 :
3181 : template <typename MarkingState>
3182 : class ToSpaceUpdatingItem : public UpdatingItem {
3183 : public:
3184 : explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
3185 : MarkingState* marking_state)
3186 : : chunk_(chunk),
3187 : start_(start),
3188 : end_(end),
3189 76537 : marking_state_(marking_state) {}
3190 153074 : ~ToSpaceUpdatingItem() override = default;
3191 :
3192 76533 : void Process() override {
3193 153066 : if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3194 : // New->new promoted pages contain garbage so they require iteration using
3195 : // markbits.
3196 1776 : ProcessVisitLive();
3197 : } else {
3198 74757 : ProcessVisitAll();
3199 : }
3200 76537 : }
3201 :
3202 : private:
3203 76103 : void ProcessVisitAll() {
3204 152207 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3205 : "ToSpaceUpdatingItem::ProcessVisitAll");
3206 : PointersUpdatingVisitor visitor;
3207 24198488 : for (Address cur = start_; cur < end_;) {
3208 24047626 : HeapObject object = HeapObject::FromAddress(cur);
3209 : Map map = object->map();
3210 24047626 : int size = object->SizeFromMap(map);
3211 : object->IterateBodyFast(map, size, &visitor);
3212 24046280 : cur += size;
3213 74758 : }
3214 74758 : }
3215 :
3216 1776 : void ProcessVisitLive() {
3217 3552 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3218 : "ToSpaceUpdatingItem::ProcessVisitLive");
3219 : // For young generation evacuations we want to visit grey objects, for
3220 : // full MC, we need to visit black objects.
3221 : PointersUpdatingVisitor visitor;
3222 18269763 : for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
3223 : chunk_, marking_state_->bitmap(chunk_))) {
3224 6088143 : object_and_size.first->IterateBodyFast(&visitor);
3225 1779 : }
3226 1779 : }
3227 :
3228 : MemoryChunk* chunk_;
3229 : Address start_;
3230 : Address end_;
3231 : MarkingState* marking_state_;
3232 : };
3233 :
3234 : template <typename MarkingState>
3235 : class RememberedSetUpdatingItem : public UpdatingItem {
3236 : public:
3237 : explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
3238 : MemoryChunk* chunk,
3239 : RememberedSetUpdatingMode updating_mode)
3240 : : heap_(heap),
3241 : marking_state_(marking_state),
3242 : chunk_(chunk),
3243 314917 : updating_mode_(updating_mode) {}
3244 629834 : ~RememberedSetUpdatingItem() override = default;
3245 :
3246 313256 : void Process() override {
3247 626525 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3248 : "RememberedSetUpdatingItem::Process");
3249 313269 : base::MutexGuard guard(chunk_->mutex());
3250 314248 : CodePageMemoryModificationScope memory_modification_scope(chunk_);
3251 314075 : UpdateUntypedPointers();
3252 628993 : UpdateTypedPointers();
3253 314461 : }
3254 :
3255 : private:
3256 : template <typename TSlot>
3257 39063287 : inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
3258 : static_assert(
3259 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
3260 : std::is_same<TSlot, MaybeObjectSlot>::value,
3261 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
3262 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
3263 39063287 : HeapObject heap_object;
3264 39063287 : if (!(*slot).GetHeapObject(&heap_object)) {
3265 : return REMOVE_SLOT;
3266 : }
3267 39121713 : if (Heap::InFromPage(heap_object)) {
3268 : MapWord map_word = heap_object->map_word();
3269 36126988 : if (map_word.IsForwardingAddress()) {
3270 : HeapObjectReference::Update(THeapObjectSlot(slot),
3271 : map_word.ToForwardingAddress());
3272 : }
3273 36118545 : bool success = (*slot).GetHeapObject(&heap_object);
3274 : USE(success);
3275 : DCHECK(success);
3276 : // If the object was in from space before and is after executing the
3277 : // callback in to space, the object is still live.
3278 : // Unfortunately, we do not know about the slot. It could be in a
3279 : // just freed free space object.
3280 36064858 : if (Heap::InToPage(heap_object)) {
3281 : return KEEP_SLOT;
3282 : }
3283 2994725 : } else if (Heap::InToPage(heap_object)) {
3284 : // Slots can point to "to" space if the page has been moved, or if the
3285 : // slot has been recorded multiple times in the remembered set, or
3286 : // if the slot was already updated during old->old updating.
3287 : // In case the page has been moved, check markbits to determine liveness
3288 : // of the slot. In the other case, the slot can just be kept.
3289 1546417 : if (Page::FromHeapObject(heap_object)
3290 : ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3291 : // IsBlackOrGrey is required because objects are marked as grey for
3292 : // the young generation collector while they are black for the full
3293 : // MC.);
3294 1543400 : if (marking_state_->IsBlackOrGrey(heap_object)) {
3295 : return KEEP_SLOT;
3296 : } else {
3297 534 : return REMOVE_SLOT;
3298 : }
3299 : }
3300 : return KEEP_SLOT;
3301 : } else {
3302 : DCHECK(!Heap::InYoungGeneration(heap_object));
3303 : }
3304 : return REMOVE_SLOT;
3305 : }
3306 :
3307 313941 : void UpdateUntypedPointers() {
3308 942938 : if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
3309 304684 : RememberedSet<OLD_TO_NEW>::Iterate(
3310 : chunk_,
3311 : [this](MaybeObjectSlot slot) {
3312 39019761 : return CheckAndUpdateOldToNewSlot(slot);
3313 39019761 : },
3314 : SlotSet::PREFREE_EMPTY_BUCKETS);
3315 : }
3316 629089 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3317 314604 : (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
3318 17931 : InvalidatedSlotsFilter filter(chunk_);
3319 35842 : RememberedSet<OLD_TO_OLD>::Iterate(
3320 : chunk_,
3321 46919497 : [&filter](MaybeObjectSlot slot) {
3322 46919497 : if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3323 46610500 : return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3324 : },
3325 17921 : SlotSet::PREFREE_EMPTY_BUCKETS);
3326 : }
3327 628883 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3328 314393 : chunk_->invalidated_slots() != nullptr) {
3329 : #ifdef DEBUG
3330 : for (auto object_size : *chunk_->invalidated_slots()) {
3331 : HeapObject object = object_size.first;
3332 : int size = object_size.second;
3333 : DCHECK_LE(object->SizeFromMap(object->map()), size);
3334 : }
3335 : #endif
3336 : // The invalidated slots are not needed after old-to-old slots were
3337 : // processsed.
3338 107 : chunk_->ReleaseInvalidatedSlots();
3339 : }
3340 314490 : }
3341 :
3342 314247 : void UpdateTypedPointers() {
3343 628498 : if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3344 : nullptr) {
3345 4180 : CHECK_NE(chunk_->owner(), heap_->map_space());
3346 : const auto check_and_update_old_to_new_slot_fn =
3347 : [this](FullMaybeObjectSlot slot) {
3348 36683 : return CheckAndUpdateOldToNewSlot(slot);
3349 36683 : };
3350 3240 : RememberedSet<OLD_TO_NEW>::IterateTyped(
3351 : chunk_, [=](SlotType slot_type, Address slot) {
3352 : return UpdateTypedSlotHelper::UpdateTypedSlot(
3353 36683 : heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3354 38303 : });
3355 : }
3356 628618 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3357 314251 : (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3358 : nullptr)) {
3359 2820 : CHECK_NE(chunk_->owner(), heap_->map_space());
3360 1880 : RememberedSet<OLD_TO_OLD>::IterateTyped(
3361 : chunk_, [=](SlotType slot_type, Address slot) {
3362 : // Using UpdateStrongSlot is OK here, because there are no weak
3363 : // typed slots.
3364 : return UpdateTypedSlotHelper::UpdateTypedSlot(
3365 : heap_, slot_type, slot,
3366 322814 : UpdateStrongSlot<AccessMode::NON_ATOMIC, FullMaybeObjectSlot>);
3367 323754 : });
3368 : }
3369 314367 : }
3370 :
3371 : Heap* heap_;
3372 : MarkingState* marking_state_;
3373 : MemoryChunk* chunk_;
3374 : RememberedSetUpdatingMode updating_mode_;
3375 : };
3376 :
3377 76537 : UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
3378 : MemoryChunk* chunk, Address start, Address end) {
3379 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
3380 153074 : chunk, start, end, non_atomic_marking_state());
3381 : }
3382 :
3383 314917 : UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
3384 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3385 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
3386 629834 : heap(), non_atomic_marking_state(), chunk, updating_mode);
3387 : }
3388 :
3389 : // Update array buffers on a page that has been evacuated by copying objects.
3390 : // Target page exclusivity in old space is guaranteed by the fact that
3391 : // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
3392 : // free list items of a given page. For new space the tracker will update
3393 : // using a lock.
3394 : class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
3395 : public:
3396 : enum EvacuationState { kRegular, kAborted };
3397 :
3398 : explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
3399 83233 : : page_(page), state_(state) {}
3400 166466 : ~ArrayBufferTrackerUpdatingItem() override = default;
3401 :
3402 83210 : void Process() override {
3403 166421 : TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3404 : "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
3405 : state_);
3406 83211 : switch (state_) {
3407 : case EvacuationState::kRegular:
3408 : ArrayBufferTracker::ProcessBuffers(
3409 83206 : page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3410 83232 : break;
3411 : case EvacuationState::kAborted:
3412 : ArrayBufferTracker::ProcessBuffers(
3413 0 : page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3414 0 : break;
3415 83237 : }
3416 83229 : }
3417 :
3418 : private:
3419 : Page* const page_;
3420 : const EvacuationState state_;
3421 : };
3422 :
3423 74510 : int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3424 74510 : ItemParallelJob* job) {
3425 : // Seed to space pages.
3426 74510 : const Address space_start = heap()->new_space()->first_allocatable_address();
3427 : const Address space_end = heap()->new_space()->top();
3428 : int pages = 0;
3429 151047 : for (Page* page : PageRange(space_start, space_end)) {
3430 : Address start =
3431 78564 : page->Contains(space_start) ? space_start : page->area_start();
3432 76537 : Address end = page->Contains(space_end) ? space_end : page->area_end();
3433 76537 : job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
3434 76537 : pages++;
3435 : }
3436 74510 : if (pages == 0) return 0;
3437 74510 : return NumberOfParallelToSpacePointerUpdateTasks(pages);
3438 : }
3439 :
3440 : template <typename IterateableSpace>
3441 372550 : int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3442 : ItemParallelJob* job, IterateableSpace* space,
3443 : RememberedSetUpdatingMode mode) {
3444 : int pages = 0;
3445 1524650 : for (MemoryChunk* chunk : *space) {
3446 : const bool contains_old_to_old_slots =
3447 : chunk->slot_set<OLD_TO_OLD>() != nullptr ||
3448 1134152 : chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
3449 : const bool contains_old_to_new_slots =
3450 : chunk->slot_set<OLD_TO_NEW>() != nullptr ||
3451 846525 : chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
3452 : const bool contains_invalidated_slots =
3453 576050 : chunk->invalidated_slots() != nullptr;
3454 576050 : if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
3455 : !contains_invalidated_slots)
3456 : continue;
3457 314917 : if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3458 : contains_invalidated_slots) {
3459 314917 : job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
3460 314917 : pages++;
3461 : }
3462 : }
3463 372550 : return pages;
3464 : }
3465 :
3466 74510 : int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
3467 : ItemParallelJob* job) {
3468 : int pages = 0;
3469 315533 : for (Page* p : new_space_evacuation_pages_) {
3470 84440 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
3471 82073 : if (p->local_tracker() == nullptr) continue;
3472 :
3473 80991 : pages++;
3474 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3475 80991 : p, ArrayBufferTrackerUpdatingItem::kRegular));
3476 : }
3477 : }
3478 74510 : return pages;
3479 : }
3480 :
3481 74510 : int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
3482 : ItemParallelJob* job) {
3483 : int pages = 0;
3484 169929 : for (Page* p : old_space_evacuation_pages_) {
3485 20934 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
3486 : p->IsEvacuationCandidate()) {
3487 10442 : if (p->local_tracker() == nullptr) continue;
3488 :
3489 2242 : pages++;
3490 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3491 2242 : p, ArrayBufferTrackerUpdatingItem::kRegular));
3492 : }
3493 : }
3494 149045 : for (auto object_and_page : aborted_evacuation_candidates_) {
3495 25 : Page* p = object_and_page.second;
3496 25 : if (p->local_tracker() == nullptr) continue;
3497 :
3498 0 : pages++;
3499 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
3500 0 : p, ArrayBufferTrackerUpdatingItem::kAborted));
3501 : }
3502 74510 : return pages;
3503 : }
3504 :
3505 74510 : void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3506 1192160 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3507 :
3508 : PointersUpdatingVisitor updating_visitor;
3509 :
3510 : {
3511 298040 : TRACE_GC(heap()->tracer(),
3512 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3513 149020 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3514 : }
3515 :
3516 : {
3517 298040 : TRACE_GC(heap()->tracer(),
3518 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3519 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3520 223530 : &page_parallel_job_semaphore_);
3521 :
3522 : int remembered_set_pages = 0;
3523 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3524 74510 : &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
3525 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3526 74510 : &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
3527 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3528 74510 : &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
3529 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3530 74510 : &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
3531 : const int remembered_set_tasks =
3532 : remembered_set_pages == 0
3533 : ? 0
3534 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3535 74510 : old_to_new_slots_);
3536 74510 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3537 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3538 274118 : for (int i = 0; i < num_tasks; i++) {
3539 : updating_job.AddTask(new PointersUpdatingTask(
3540 : isolate(),
3541 399216 : GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3542 : }
3543 149020 : updating_job.Run();
3544 : }
3545 :
3546 : {
3547 : // - Update pointers in map space in a separate phase to avoid data races
3548 : // with Map->LayoutDescriptor edge.
3549 : // - Update array buffer trackers in the second phase to have access to
3550 : // byte length which is potentially a HeapNumber.
3551 298040 : TRACE_GC(heap()->tracer(),
3552 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3553 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3554 149020 : &page_parallel_job_semaphore_);
3555 :
3556 : int array_buffer_pages = 0;
3557 74510 : array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3558 74510 : array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
3559 :
3560 : int remembered_set_pages = 0;
3561 : remembered_set_pages += CollectRememberedSetUpdatingItems(
3562 74510 : &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
3563 : const int remembered_set_tasks =
3564 : remembered_set_pages == 0
3565 : ? 0
3566 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3567 74510 : old_to_new_slots_);
3568 : const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
3569 74510 : if (num_tasks > 0) {
3570 117366 : for (int i = 0; i < num_tasks; i++) {
3571 : updating_job.AddTask(new PointersUpdatingTask(
3572 : isolate(),
3573 234732 : GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3574 : }
3575 74510 : updating_job.Run();
3576 74510 : heap()->array_buffer_collector()->FreeAllocations();
3577 74510 : }
3578 : }
3579 :
3580 : {
3581 298040 : TRACE_GC(heap()->tracer(),
3582 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3583 : // Update pointers from external string table.
3584 : heap_->UpdateReferencesInExternalStringTable(
3585 74510 : &UpdateReferenceInExternalStringTableEntry);
3586 :
3587 74510 : EvacuationWeakObjectRetainer evacuation_object_retainer;
3588 149020 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3589 74510 : }
3590 74510 : }
3591 :
3592 25 : void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3593 : HeapObject failed_object, MemoryChunk* chunk) {
3594 25 : base::MutexGuard guard(&mutex_);
3595 :
3596 : aborted_evacuation_candidates_.push_back(
3597 50 : std::make_pair(failed_object, static_cast<Page*>(chunk)));
3598 25 : }
3599 :
3600 62380 : void MarkCompactCollector::PostProcessEvacuationCandidates() {
3601 124785 : for (auto object_and_page : aborted_evacuation_candidates_) {
3602 : HeapObject failed_object = object_and_page.first;
3603 : Page* page = object_and_page.second;
3604 : page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3605 : // Aborted compaction page. We have to record slots here, since we
3606 : // might not have recorded them in first place.
3607 :
3608 : // Remove outdated slots.
3609 : RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3610 : failed_object->address(),
3611 50 : SlotSet::PREFREE_EMPTY_BUCKETS);
3612 : RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3613 25 : failed_object->address());
3614 : // Recompute live bytes.
3615 25 : LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
3616 : // Re-record slots.
3617 25 : EvacuateRecordOnlyVisitor record_visitor(heap());
3618 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3619 : &record_visitor,
3620 25 : LiveObjectVisitor::kKeepMarking);
3621 : // Array buffers will be processed during pointer updating.
3622 : }
3623 : const int aborted_pages =
3624 124760 : static_cast<int>(aborted_evacuation_candidates_.size());
3625 : int aborted_pages_verified = 0;
3626 135227 : for (Page* p : old_space_evacuation_pages_) {
3627 10467 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3628 : // After clearing the evacuation candidate flag the page is again in a
3629 : // regular state.
3630 : p->ClearEvacuationCandidate();
3631 : aborted_pages_verified++;
3632 : } else {
3633 : DCHECK(p->IsEvacuationCandidate());
3634 : DCHECK(p->SweepingDone());
3635 10442 : p->owner()->memory_chunk_list().Remove(p);
3636 : }
3637 : }
3638 : DCHECK_EQ(aborted_pages_verified, aborted_pages);
3639 62380 : if (FLAG_trace_evacuation && (aborted_pages > 0)) {
3640 : PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
3641 0 : isolate()->time_millis_since_init(), aborted_pages);
3642 : }
3643 62380 : }
3644 :
3645 74510 : void MarkCompactCollector::ReleaseEvacuationCandidates() {
3646 159487 : for (Page* p : old_space_evacuation_pages_) {
3647 10467 : if (!p->IsEvacuationCandidate()) continue;
3648 : PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3649 : non_atomic_marking_state()->SetLiveBytes(p, 0);
3650 10442 : CHECK(p->SweepingDone());
3651 10442 : space->ReleasePage(p);
3652 : }
3653 : old_space_evacuation_pages_.clear();
3654 74510 : compacting_ = false;
3655 74510 : }
3656 :
3657 701383 : void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3658 : space->ClearStats();
3659 :
3660 : int will_be_swept = 0;
3661 : bool unused_page_present = false;
3662 :
3663 : // Loop needs to support deletion if live bytes == 0 for a page.
3664 717025 : for (auto it = space->begin(); it != space->end();) {
3665 483028 : Page* p = *(it++);
3666 : DCHECK(p->SweepingDone());
3667 :
3668 493495 : if (p->IsEvacuationCandidate()) {
3669 : // Will be processed in Evacuate.
3670 : DCHECK(!evacuation_candidates_.empty());
3671 : continue;
3672 : }
3673 :
3674 483028 : if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3675 : // We need to sweep the page to get it into an iterable state again. Note
3676 : // that this adds unusable memory into the free list that is later on
3677 : // (in the free list) dropped again. Since we only use the flag for
3678 : // testing this is fine.
3679 : p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
3680 : sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3681 : Heap::ShouldZapGarbage()
3682 : ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
3683 170 : : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
3684 : space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
3685 : continue;
3686 : }
3687 :
3688 : // One unused page is kept, all further are released before sweeping them.
3689 482858 : if (non_atomic_marking_state()->live_bytes(p) == 0) {
3690 12468 : if (unused_page_present) {
3691 : if (FLAG_gc_verbose) {
3692 : PrintIsolate(isolate(), "sweeping: released page: %p",
3693 : static_cast<void*>(p));
3694 : }
3695 5175 : ArrayBufferTracker::FreeAll(p);
3696 482858 : space->memory_chunk_list().Remove(p);
3697 5175 : space->ReleasePage(p);
3698 5175 : continue;
3699 : }
3700 : unused_page_present = true;
3701 : }
3702 :
3703 477683 : sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
3704 : will_be_swept++;
3705 : }
3706 :
3707 : if (FLAG_gc_verbose) {
3708 : PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3709 : space->name(), will_be_swept);
3710 : }
3711 223530 : }
3712 :
3713 149020 : void MarkCompactCollector::StartSweepSpaces() {
3714 819610 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3715 : #ifdef DEBUG
3716 : state_ = SWEEP_SPACES;
3717 : #endif
3718 :
3719 : {
3720 : {
3721 : GCTracer::Scope sweep_scope(heap()->tracer(),
3722 74510 : GCTracer::Scope::MC_SWEEP_OLD);
3723 74510 : StartSweepSpace(heap()->old_space());
3724 : }
3725 : {
3726 : GCTracer::Scope sweep_scope(heap()->tracer(),
3727 74510 : GCTracer::Scope::MC_SWEEP_CODE);
3728 74510 : StartSweepSpace(heap()->code_space());
3729 : }
3730 : {
3731 : GCTracer::Scope sweep_scope(heap()->tracer(),
3732 74510 : GCTracer::Scope::MC_SWEEP_MAP);
3733 74510 : StartSweepSpace(heap()->map_space());
3734 : }
3735 74510 : sweeper()->StartSweeping();
3736 74510 : }
3737 74510 : }
3738 :
3739 0 : void MarkCompactCollector::MarkingWorklist::PrintWorklist(
3740 : const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
3741 : std::map<InstanceType, int> count;
3742 0 : int total_count = 0;
3743 0 : worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
3744 0 : ++total_count;
3745 0 : count[obj->map()->instance_type()]++;
3746 0 : });
3747 : std::vector<std::pair<int, InstanceType>> rank;
3748 0 : rank.reserve(count.size());
3749 0 : for (const auto& i : count) {
3750 0 : rank.emplace_back(i.second, i.first);
3751 : }
3752 : std::map<InstanceType, std::string> instance_type_name;
3753 : #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
3754 0 : INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
3755 : #undef INSTANCE_TYPE_NAME
3756 : std::sort(rank.begin(), rank.end(),
3757 0 : std::greater<std::pair<int, InstanceType>>());
3758 0 : PrintF("Worklist %s: %d\n", worklist_name, total_count);
3759 0 : for (auto i : rank) {
3760 0 : PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
3761 : }
3762 0 : }
3763 :
3764 : #ifdef ENABLE_MINOR_MC
3765 :
3766 : namespace {
3767 :
3768 : #ifdef VERIFY_HEAP
3769 :
3770 : class YoungGenerationMarkingVerifier : public MarkingVerifier {
3771 : public:
3772 : explicit YoungGenerationMarkingVerifier(Heap* heap)
3773 : : MarkingVerifier(heap),
3774 : marking_state_(
3775 : heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
3776 :
3777 : Bitmap* bitmap(const MemoryChunk* chunk) override {
3778 : return marking_state_->bitmap(chunk);
3779 : }
3780 :
3781 : bool IsMarked(HeapObject object) override {
3782 : return marking_state_->IsGrey(object);
3783 : }
3784 :
3785 : bool IsBlackOrGrey(HeapObject object) override {
3786 : return marking_state_->IsBlackOrGrey(object);
3787 : }
3788 :
3789 : void Run() override {
3790 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3791 : VerifyMarking(heap_->new_space());
3792 : }
3793 :
3794 : protected:
3795 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3796 : VerifyPointersImpl(start, end);
3797 : }
3798 :
3799 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3800 : VerifyPointersImpl(start, end);
3801 : }
3802 :
3803 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3804 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3805 : VerifyHeapObjectImpl(target);
3806 : }
3807 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3808 : VerifyHeapObjectImpl(rinfo->target_object());
3809 : }
3810 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
3811 : VerifyPointersImpl(start, end);
3812 : }
3813 :
3814 : private:
3815 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
3816 : CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
3817 : }
3818 :
3819 : template <typename TSlot>
3820 : V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
3821 : for (TSlot slot = start; slot < end; ++slot) {
3822 : typename TSlot::TObject object = *slot;
3823 : HeapObject heap_object;
3824 : // Minor MC treats weak references as strong.
3825 : if (object.GetHeapObject(&heap_object)) {
3826 : VerifyHeapObjectImpl(heap_object);
3827 : }
3828 : }
3829 : }
3830 :
3831 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3832 : };
3833 :
3834 : class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
3835 : public:
3836 : explicit YoungGenerationEvacuationVerifier(Heap* heap)
3837 : : EvacuationVerifier(heap) {}
3838 :
3839 : void Run() override {
3840 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3841 : VerifyEvacuation(heap_->new_space());
3842 : VerifyEvacuation(heap_->old_space());
3843 : VerifyEvacuation(heap_->code_space());
3844 : VerifyEvacuation(heap_->map_space());
3845 : }
3846 :
3847 : protected:
3848 : V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
3849 : CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
3850 : Heap::InToPage(heap_object));
3851 : }
3852 :
3853 : template <typename TSlot>
3854 : void VerifyPointersImpl(TSlot start, TSlot end) {
3855 : for (TSlot current = start; current < end; ++current) {
3856 : typename TSlot::TObject object = *current;
3857 : HeapObject heap_object;
3858 : if (object.GetHeapObject(&heap_object)) {
3859 : VerifyHeapObjectImpl(heap_object);
3860 : }
3861 : }
3862 : }
3863 :
3864 : void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3865 : VerifyPointersImpl(start, end);
3866 : }
3867 : void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3868 : VerifyPointersImpl(start, end);
3869 : }
3870 : void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3871 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3872 : VerifyHeapObjectImpl(target);
3873 : }
3874 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3875 : VerifyHeapObjectImpl(rinfo->target_object());
3876 : }
3877 : void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
3878 : VerifyPointersImpl(start, end);
3879 : }
3880 : };
3881 :
3882 : #endif // VERIFY_HEAP
3883 :
3884 0 : bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
3885 : DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
3886 0 : return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
3887 : ->non_atomic_marking_state()
3888 0 : ->IsGrey(HeapObject::cast(*p));
3889 : }
3890 :
3891 : } // namespace
3892 :
3893 122068 : class YoungGenerationMarkingVisitor final
3894 : : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
3895 : public:
3896 : YoungGenerationMarkingVisitor(
3897 : MinorMarkCompactCollector::MarkingState* marking_state,
3898 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
3899 122098 : : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
3900 :
3901 0 : V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
3902 : ObjectSlot end) final {
3903 : VisitPointersImpl(host, start, end);
3904 0 : }
3905 :
3906 0 : V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
3907 : MaybeObjectSlot end) final {
3908 : VisitPointersImpl(host, start, end);
3909 0 : }
3910 :
3911 0 : V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
3912 : VisitPointerImpl(host, slot);
3913 0 : }
3914 :
3915 0 : V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
3916 : VisitPointerImpl(host, slot);
3917 0 : }
3918 :
3919 0 : V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
3920 : // Code objects are not expected in new space.
3921 0 : UNREACHABLE();
3922 : }
3923 :
3924 0 : V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
3925 : // Code objects are not expected in new space.
3926 0 : UNREACHABLE();
3927 : }
3928 :
3929 : private:
3930 : template <typename TSlot>
3931 : V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
3932 0 : for (TSlot slot = start; slot < end; ++slot) {
3933 : VisitPointer(host, slot);
3934 : }
3935 : }
3936 :
3937 : template <typename TSlot>
3938 : V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
3939 0 : typename TSlot::TObject target = *slot;
3940 0 : if (Heap::InYoungGeneration(target)) {
3941 : // Treat weak references as strong.
3942 : // TODO(marja): Proper weakness handling for minor-mcs.
3943 0 : HeapObject target_object = target.GetHeapObject();
3944 0 : MarkObjectViaMarkingWorklist(target_object);
3945 : }
3946 : }
3947 :
3948 0 : inline void MarkObjectViaMarkingWorklist(HeapObject object) {
3949 0 : if (marking_state_->WhiteToGrey(object)) {
3950 : // Marking deque overflow is unsupported for the young generation.
3951 0 : CHECK(worklist_.Push(object));
3952 : }
3953 0 : }
3954 :
3955 : MinorMarkCompactCollector::MarkingWorklist::View worklist_;
3956 : MinorMarkCompactCollector::MarkingState* marking_state_;
3957 : };
3958 :
3959 61049 : void MinorMarkCompactCollector::SetUp() {}
3960 :
3961 61034 : void MinorMarkCompactCollector::TearDown() {}
3962 :
3963 61049 : MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
3964 : : MarkCompactCollectorBase(heap),
3965 : worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
3966 : main_marking_visitor_(new YoungGenerationMarkingVisitor(
3967 61049 : marking_state(), worklist_, kMainMarker)),
3968 183147 : page_parallel_job_semaphore_(0) {
3969 : static_assert(
3970 : kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
3971 : "more marker tasks than marking deque can handle");
3972 61049 : }
3973 :
3974 183102 : MinorMarkCompactCollector::~MinorMarkCompactCollector() {
3975 61034 : delete worklist_;
3976 61034 : delete main_marking_visitor_;
3977 122068 : }
3978 :
3979 0 : int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
3980 : DCHECK_GT(pages, 0);
3981 0 : if (!FLAG_minor_mc_parallel_marking) return 1;
3982 : // Pages are not private to markers but we can still use them to estimate the
3983 : // amount of marking that is required.
3984 : const int kPagesPerTask = 2;
3985 0 : const int wanted_tasks = Max(1, pages / kPagesPerTask);
3986 : return Min(NumberOfAvailableCores(),
3987 : Min(wanted_tasks,
3988 0 : MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
3989 : }
3990 :
3991 74510 : void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
3992 149020 : for (Page* p : sweep_to_iterate_pages_) {
3993 0 : if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
3994 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
3995 0 : non_atomic_marking_state()->ClearLiveness(p);
3996 : }
3997 : }
3998 : sweep_to_iterate_pages_.clear();
3999 74510 : }
4000 :
4001 0 : class YoungGenerationMigrationObserver final : public MigrationObserver {
4002 : public:
4003 : YoungGenerationMigrationObserver(Heap* heap,
4004 : MarkCompactCollector* mark_compact_collector)
4005 : : MigrationObserver(heap),
4006 0 : mark_compact_collector_(mark_compact_collector) {}
4007 :
4008 0 : inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
4009 : int size) final {
4010 : // Migrate color to old generation marking in case the object survived young
4011 : // generation garbage collection.
4012 0 : if (heap_->incremental_marking()->IsMarking()) {
4013 : DCHECK(
4014 : heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
4015 : heap_->incremental_marking()->TransferColor(src, dst);
4016 : }
4017 0 : }
4018 :
4019 : protected:
4020 : base::Mutex mutex_;
4021 : MarkCompactCollector* mark_compact_collector_;
4022 : };
4023 :
4024 0 : class YoungGenerationRecordMigratedSlotVisitor final
4025 : : public RecordMigratedSlotVisitor {
4026 : public:
4027 : explicit YoungGenerationRecordMigratedSlotVisitor(
4028 : MarkCompactCollector* collector)
4029 0 : : RecordMigratedSlotVisitor(collector) {}
4030 :
4031 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
4032 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
4033 0 : UNREACHABLE();
4034 : }
4035 :
4036 : private:
4037 : // Only record slots for host objects that are considered as live by the full
4038 : // collector.
4039 0 : inline bool IsLive(HeapObject object) {
4040 0 : return collector_->non_atomic_marking_state()->IsBlack(object);
4041 : }
4042 :
4043 0 : inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
4044 : Address slot) final {
4045 0 : if (value->IsStrongOrWeak()) {
4046 0 : MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
4047 0 : if (p->InYoungGeneration()) {
4048 : DCHECK_IMPLIES(
4049 : p->IsToPage(),
4050 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
4051 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
4052 0 : MemoryChunk::FromHeapObject(host), slot);
4053 0 : } else if (p->IsEvacuationCandidate() && IsLive(host)) {
4054 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
4055 0 : MemoryChunk::FromHeapObject(host), slot);
4056 : }
4057 : }
4058 0 : }
4059 : };
4060 :
4061 0 : void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4062 0 : TRACE_GC(heap()->tracer(),
4063 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
4064 :
4065 : PointersUpdatingVisitor updating_visitor;
4066 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
4067 0 : &page_parallel_job_semaphore_);
4068 :
4069 0 : CollectNewSpaceArrayBufferTrackerItems(&updating_job);
4070 : // Create batches of global handles.
4071 0 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4072 : int remembered_set_pages = 0;
4073 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4074 : &updating_job, heap()->old_space(),
4075 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4076 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4077 : &updating_job, heap()->code_space(),
4078 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4079 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4080 : &updating_job, heap()->map_space(),
4081 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4082 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4083 : &updating_job, heap()->lo_space(),
4084 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4085 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4086 : &updating_job, heap()->code_lo_space(),
4087 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4088 : const int remembered_set_tasks =
4089 : remembered_set_pages == 0 ? 0
4090 : : NumberOfParallelPointerUpdateTasks(
4091 0 : remembered_set_pages, old_to_new_slots_);
4092 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
4093 0 : for (int i = 0; i < num_tasks; i++) {
4094 : updating_job.AddTask(new PointersUpdatingTask(
4095 : isolate(), GCTracer::BackgroundScope::
4096 0 : MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
4097 : }
4098 :
4099 : {
4100 0 : TRACE_GC(heap()->tracer(),
4101 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4102 0 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
4103 : }
4104 : {
4105 0 : TRACE_GC(heap()->tracer(),
4106 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
4107 0 : updating_job.Run();
4108 0 : heap()->array_buffer_collector()->FreeAllocations();
4109 : }
4110 :
4111 : {
4112 0 : TRACE_GC(heap()->tracer(),
4113 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
4114 :
4115 0 : EvacuationWeakObjectRetainer evacuation_object_retainer;
4116 0 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4117 :
4118 : // Update pointers from external string table.
4119 : heap()->UpdateYoungReferencesInExternalStringTable(
4120 0 : &UpdateReferenceInExternalStringTableEntry);
4121 0 : }
4122 0 : }
4123 :
4124 0 : class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
4125 : public:
4126 : explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
4127 0 : : collector_(collector) {}
4128 :
4129 0 : void VisitRootPointer(Root root, const char* description,
4130 : FullObjectSlot p) final {
4131 : MarkObjectByPointer(p);
4132 0 : }
4133 :
4134 0 : void VisitRootPointers(Root root, const char* description,
4135 : FullObjectSlot start, FullObjectSlot end) final {
4136 0 : for (FullObjectSlot p = start; p < end; ++p) {
4137 : MarkObjectByPointer(p);
4138 : }
4139 0 : }
4140 :
4141 : private:
4142 : V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
4143 0 : if (!(*p)->IsHeapObject()) return;
4144 0 : collector_->MarkRootObject(HeapObject::cast(*p));
4145 : }
4146 : MinorMarkCompactCollector* const collector_;
4147 : };
4148 :
4149 0 : void MinorMarkCompactCollector::CollectGarbage() {
4150 : {
4151 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
4152 0 : heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
4153 0 : CleanupSweepToIteratePages();
4154 : }
4155 :
4156 0 : MarkLiveObjects();
4157 0 : ClearNonLiveReferences();
4158 : #ifdef VERIFY_HEAP
4159 : if (FLAG_verify_heap) {
4160 : YoungGenerationMarkingVerifier verifier(heap());
4161 : verifier.Run();
4162 : }
4163 : #endif // VERIFY_HEAP
4164 :
4165 0 : Evacuate();
4166 : #ifdef VERIFY_HEAP
4167 : if (FLAG_verify_heap) {
4168 : YoungGenerationEvacuationVerifier verifier(heap());
4169 : verifier.Run();
4170 : }
4171 : #endif // VERIFY_HEAP
4172 :
4173 : {
4174 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
4175 0 : heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
4176 : }
4177 :
4178 : {
4179 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
4180 0 : for (Page* p :
4181 0 : PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
4182 : DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
4183 0 : non_atomic_marking_state()->ClearLiveness(p);
4184 0 : if (FLAG_concurrent_marking) {
4185 : // Ensure that concurrent marker does not track pages that are
4186 : // going to be unmapped.
4187 0 : heap()->concurrent_marking()->ClearMemoryChunkData(p);
4188 : }
4189 : }
4190 : // Since we promote all surviving large objects immediatelly, all remaining
4191 : // large objects must be dead.
4192 : // TODO(ulan): Don't free all as soon as we have an intermediate generation.
4193 0 : heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
4194 : }
4195 :
4196 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4197 0 : heap(), [](MemoryChunk* chunk) {
4198 0 : if (chunk->SweepingDone()) {
4199 0 : RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
4200 : } else {
4201 0 : RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
4202 : }
4203 0 : });
4204 :
4205 : heap()->account_external_memory_concurrently_freed();
4206 0 : }
4207 :
4208 0 : void MinorMarkCompactCollector::MakeIterable(
4209 0 : Page* p, MarkingTreatmentMode marking_mode,
4210 : FreeSpaceTreatmentMode free_space_mode) {
4211 0 : CHECK(!p->IsLargePage());
4212 : // We have to clear the full collectors markbits for the areas that we
4213 : // remove here.
4214 : MarkCompactCollector* full_collector = heap()->mark_compact_collector();
4215 : Address free_start = p->area_start();
4216 :
4217 0 : for (auto object_and_size :
4218 0 : LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
4219 0 : HeapObject const object = object_and_size.first;
4220 : DCHECK(non_atomic_marking_state()->IsGrey(object));
4221 : Address free_end = object->address();
4222 0 : if (free_end != free_start) {
4223 0 : CHECK_GT(free_end, free_start);
4224 0 : size_t size = static_cast<size_t>(free_end - free_start);
4225 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4226 : p->AddressToMarkbitIndex(free_start),
4227 0 : p->AddressToMarkbitIndex(free_end));
4228 0 : if (free_space_mode == ZAP_FREE_SPACE) {
4229 : ZapCode(free_start, size);
4230 : }
4231 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4232 0 : ClearRecordedSlots::kNo);
4233 : }
4234 0 : Map map = object->synchronized_map();
4235 0 : int size = object->SizeFromMap(map);
4236 0 : free_start = free_end + size;
4237 : }
4238 :
4239 0 : if (free_start != p->area_end()) {
4240 0 : CHECK_GT(p->area_end(), free_start);
4241 0 : size_t size = static_cast<size_t>(p->area_end() - free_start);
4242 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4243 : p->AddressToMarkbitIndex(free_start),
4244 0 : p->AddressToMarkbitIndex(p->area_end()));
4245 0 : if (free_space_mode == ZAP_FREE_SPACE) {
4246 : ZapCode(free_start, size);
4247 : }
4248 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4249 0 : ClearRecordedSlots::kNo);
4250 : }
4251 :
4252 0 : if (marking_mode == MarkingTreatmentMode::CLEAR) {
4253 0 : non_atomic_marking_state()->ClearLiveness(p);
4254 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
4255 : }
4256 0 : }
4257 :
4258 : namespace {
4259 :
4260 : // Helper class for pruning the string table.
4261 0 : class YoungGenerationExternalStringTableCleaner : public RootVisitor {
4262 : public:
4263 : YoungGenerationExternalStringTableCleaner(
4264 : MinorMarkCompactCollector* collector)
4265 0 : : heap_(collector->heap()),
4266 0 : marking_state_(collector->non_atomic_marking_state()) {}
4267 :
4268 0 : void VisitRootPointers(Root root, const char* description,
4269 : FullObjectSlot start, FullObjectSlot end) override {
4270 : DCHECK_EQ(static_cast<int>(root),
4271 : static_cast<int>(Root::kExternalStringsTable));
4272 : // Visit all HeapObject pointers in [start, end).
4273 0 : for (FullObjectSlot p = start; p < end; ++p) {
4274 0 : Object o = *p;
4275 0 : if (o->IsHeapObject()) {
4276 : HeapObject heap_object = HeapObject::cast(o);
4277 0 : if (marking_state_->IsWhite(heap_object)) {
4278 0 : if (o->IsExternalString()) {
4279 0 : heap_->FinalizeExternalString(String::cast(*p));
4280 : } else {
4281 : // The original external string may have been internalized.
4282 : DCHECK(o->IsThinString());
4283 : }
4284 : // Set the entry to the_hole_value (as deleted).
4285 0 : p.store(ReadOnlyRoots(heap_).the_hole_value());
4286 : }
4287 : }
4288 : }
4289 0 : }
4290 :
4291 : private:
4292 : Heap* heap_;
4293 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4294 : };
4295 :
4296 : // Marked young generation objects and all old generation objects will be
4297 : // retained.
4298 0 : class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
4299 : public:
4300 : explicit MinorMarkCompactWeakObjectRetainer(
4301 : MinorMarkCompactCollector* collector)
4302 0 : : marking_state_(collector->non_atomic_marking_state()) {}
4303 :
4304 0 : Object RetainAs(Object object) override {
4305 : HeapObject heap_object = HeapObject::cast(object);
4306 0 : if (!Heap::InYoungGeneration(heap_object)) return object;
4307 :
4308 : // Young generation marking only marks to grey instead of black.
4309 : DCHECK(!marking_state_->IsBlack(heap_object));
4310 0 : if (marking_state_->IsGrey(heap_object)) {
4311 0 : return object;
4312 : }
4313 0 : return Object();
4314 : }
4315 :
4316 : private:
4317 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4318 : };
4319 :
4320 : } // namespace
4321 :
4322 0 : void MinorMarkCompactCollector::ClearNonLiveReferences() {
4323 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
4324 :
4325 : {
4326 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
4327 : // Internalized strings are always stored in old space, so there is no need
4328 : // to clean them here.
4329 : YoungGenerationExternalStringTableCleaner external_visitor(this);
4330 0 : heap()->external_string_table_.IterateYoung(&external_visitor);
4331 0 : heap()->external_string_table_.CleanUpYoung();
4332 : }
4333 :
4334 : {
4335 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
4336 : // Process the weak references.
4337 : MinorMarkCompactWeakObjectRetainer retainer(this);
4338 0 : heap()->ProcessYoungWeakReferences(&retainer);
4339 0 : }
4340 0 : }
4341 :
4342 0 : void MinorMarkCompactCollector::EvacuatePrologue() {
4343 0 : NewSpace* new_space = heap()->new_space();
4344 : // Append the list of new space pages to be processed.
4345 0 : for (Page* p :
4346 0 : PageRange(new_space->first_allocatable_address(), new_space->top())) {
4347 0 : new_space_evacuation_pages_.push_back(p);
4348 : }
4349 0 : new_space->Flip();
4350 0 : new_space->ResetLinearAllocationArea();
4351 :
4352 0 : heap()->new_lo_space()->Flip();
4353 0 : heap()->new_lo_space()->ResetPendingObject();
4354 0 : }
4355 :
4356 0 : void MinorMarkCompactCollector::EvacuateEpilogue() {
4357 0 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
4358 : // Give pages that are queued to be freed back to the OS.
4359 0 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4360 0 : }
4361 :
4362 0 : UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4363 : MemoryChunk* chunk, Address start, Address end) {
4364 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4365 0 : chunk, start, end, non_atomic_marking_state());
4366 : }
4367 :
4368 0 : UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4369 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4370 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4371 0 : heap(), non_atomic_marking_state(), chunk, updating_mode);
4372 : }
4373 :
4374 : class MarkingItem;
4375 : class PageMarkingItem;
4376 : class RootMarkingItem;
4377 : class YoungGenerationMarkingTask;
4378 :
4379 0 : class MarkingItem : public ItemParallelJob::Item {
4380 : public:
4381 0 : ~MarkingItem() override = default;
4382 : virtual void Process(YoungGenerationMarkingTask* task) = 0;
4383 : };
4384 :
4385 0 : class YoungGenerationMarkingTask : public ItemParallelJob::Task {
4386 : public:
4387 0 : YoungGenerationMarkingTask(
4388 : Isolate* isolate, MinorMarkCompactCollector* collector,
4389 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4390 : : ItemParallelJob::Task(isolate),
4391 : collector_(collector),
4392 : marking_worklist_(global_worklist, task_id),
4393 0 : marking_state_(collector->marking_state()),
4394 0 : visitor_(marking_state_, global_worklist, task_id) {
4395 0 : local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4396 0 : Page::kPageSize);
4397 0 : }
4398 :
4399 0 : void RunInParallel() override {
4400 0 : TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
4401 : GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4402 0 : double marking_time = 0.0;
4403 : {
4404 : TimedScope scope(&marking_time);
4405 : MarkingItem* item = nullptr;
4406 0 : while ((item = GetItem<MarkingItem>()) != nullptr) {
4407 0 : item->Process(this);
4408 0 : item->MarkFinished();
4409 0 : EmptyLocalMarkingWorklist();
4410 : }
4411 0 : EmptyMarkingWorklist();
4412 : DCHECK(marking_worklist_.IsLocalEmpty());
4413 0 : FlushLiveBytes();
4414 : }
4415 0 : if (FLAG_trace_minor_mc_parallel_marking) {
4416 0 : PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
4417 0 : static_cast<void*>(this), marking_time);
4418 0 : }
4419 0 : }
4420 :
4421 0 : void MarkObject(Object object) {
4422 0 : if (!Heap::InYoungGeneration(object)) return;
4423 : HeapObject heap_object = HeapObject::cast(object);
4424 0 : if (marking_state_->WhiteToGrey(heap_object)) {
4425 : const int size = visitor_.Visit(heap_object);
4426 0 : IncrementLiveBytes(heap_object, size);
4427 : }
4428 : }
4429 :
4430 : private:
4431 0 : void EmptyLocalMarkingWorklist() {
4432 0 : HeapObject object;
4433 0 : while (marking_worklist_.Pop(&object)) {
4434 : const int size = visitor_.Visit(object);
4435 0 : IncrementLiveBytes(object, size);
4436 : }
4437 0 : }
4438 :
4439 0 : void EmptyMarkingWorklist() {
4440 0 : HeapObject object;
4441 0 : while (marking_worklist_.Pop(&object)) {
4442 : const int size = visitor_.Visit(object);
4443 0 : IncrementLiveBytes(object, size);
4444 : }
4445 0 : }
4446 :
4447 : void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
4448 0 : local_live_bytes_[Page::FromHeapObject(object)] += bytes;
4449 : }
4450 :
4451 0 : void FlushLiveBytes() {
4452 0 : for (auto pair : local_live_bytes_) {
4453 : marking_state_->IncrementLiveBytes(pair.first, pair.second);
4454 : }
4455 0 : }
4456 :
4457 : MinorMarkCompactCollector* collector_;
4458 : MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4459 : MinorMarkCompactCollector::MarkingState* marking_state_;
4460 : YoungGenerationMarkingVisitor visitor_;
4461 : std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4462 : };
4463 :
4464 : class PageMarkingItem : public MarkingItem {
4465 : public:
4466 : explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
4467 0 : : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
4468 0 : ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
4469 :
4470 0 : void Process(YoungGenerationMarkingTask* task) override {
4471 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4472 : "PageMarkingItem::Process");
4473 0 : base::MutexGuard guard(chunk_->mutex());
4474 : MarkUntypedPointers(task);
4475 0 : MarkTypedPointers(task);
4476 0 : }
4477 :
4478 : private:
4479 0 : inline Heap* heap() { return chunk_->heap(); }
4480 :
4481 : void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4482 : RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
4483 0 : [this, task](MaybeObjectSlot slot) {
4484 0 : return CheckAndMarkObject(task, slot);
4485 0 : },
4486 0 : SlotSet::PREFREE_EMPTY_BUCKETS);
4487 : }
4488 :
4489 : void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4490 : RememberedSet<OLD_TO_NEW>::IterateTyped(
4491 0 : chunk_, [=](SlotType slot_type, Address slot) {
4492 : return UpdateTypedSlotHelper::UpdateTypedSlot(
4493 0 : heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
4494 0 : return CheckAndMarkObject(task, slot);
4495 0 : });
4496 0 : });
4497 : }
4498 :
4499 : template <typename TSlot>
4500 : V8_INLINE SlotCallbackResult
4501 : CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
4502 : static_assert(
4503 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
4504 : std::is_same<TSlot, MaybeObjectSlot>::value,
4505 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
4506 0 : MaybeObject object = *slot;
4507 0 : if (Heap::InYoungGeneration(object)) {
4508 : // Marking happens before flipping the young generation, so the object
4509 : // has to be in a to page.
4510 : DCHECK(Heap::InToPage(object));
4511 0 : HeapObject heap_object;
4512 0 : bool success = object.GetHeapObject(&heap_object);
4513 : USE(success);
4514 : DCHECK(success);
4515 0 : task->MarkObject(heap_object);
4516 0 : slots_++;
4517 : return KEEP_SLOT;
4518 : }
4519 : return REMOVE_SLOT;
4520 : }
4521 :
4522 : MemoryChunk* chunk_;
4523 : std::atomic<int>* global_slots_;
4524 : int slots_;
4525 : };
4526 :
4527 0 : void MinorMarkCompactCollector::MarkRootSetInParallel(
4528 0 : RootMarkingVisitor* root_visitor) {
4529 : std::atomic<int> slots;
4530 : {
4531 : ItemParallelJob job(isolate()->cancelable_task_manager(),
4532 0 : &page_parallel_job_semaphore_);
4533 :
4534 : // Seed the root set (roots + old->new set).
4535 : {
4536 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4537 : isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
4538 0 : &JSObject::IsUnmodifiedApiObject);
4539 0 : heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
4540 : // Create items for each page.
4541 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4542 0 : heap(), [&job, &slots](MemoryChunk* chunk) {
4543 0 : job.AddItem(new PageMarkingItem(chunk, &slots));
4544 0 : });
4545 : }
4546 :
4547 : // Add tasks and run in parallel.
4548 : {
4549 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4550 : const int new_space_pages =
4551 0 : static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
4552 0 : const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
4553 0 : for (int i = 0; i < num_tasks; i++) {
4554 : job.AddTask(
4555 0 : new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
4556 : }
4557 0 : job.Run();
4558 0 : DCHECK(worklist()->IsEmpty());
4559 0 : }
4560 : }
4561 0 : old_to_new_slots_ = slots;
4562 0 : }
4563 :
4564 0 : void MinorMarkCompactCollector::MarkLiveObjects() {
4565 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4566 :
4567 : PostponeInterruptsScope postpone(isolate());
4568 :
4569 : RootMarkingVisitor root_visitor(this);
4570 :
4571 0 : MarkRootSetInParallel(&root_visitor);
4572 :
4573 : // Mark rest on the main thread.
4574 : {
4575 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4576 0 : ProcessMarkingWorklist();
4577 : }
4578 :
4579 : {
4580 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4581 : isolate()->global_handles()->MarkYoungWeakUnmodifiedObjectsPending(
4582 0 : &IsUnmarkedObjectForYoungGeneration);
4583 : isolate()->global_handles()->IterateYoungWeakUnmodifiedRootsForFinalizers(
4584 0 : &root_visitor);
4585 : isolate()
4586 : ->global_handles()
4587 : ->IterateYoungWeakUnmodifiedRootsForPhantomHandles(
4588 0 : &root_visitor, &IsUnmarkedObjectForYoungGeneration);
4589 0 : ProcessMarkingWorklist();
4590 0 : }
4591 0 : }
4592 :
4593 0 : void MinorMarkCompactCollector::ProcessMarkingWorklist() {
4594 : MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
4595 0 : HeapObject object;
4596 0 : while (marking_worklist.Pop(&object)) {
4597 : DCHECK(!object->IsFiller());
4598 : DCHECK(object->IsHeapObject());
4599 : DCHECK(heap()->Contains(object));
4600 : DCHECK(non_atomic_marking_state()->IsGrey(object));
4601 : main_marking_visitor()->Visit(object);
4602 : }
4603 : DCHECK(marking_worklist.IsLocalEmpty());
4604 0 : }
4605 :
4606 0 : void MinorMarkCompactCollector::Evacuate() {
4607 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
4608 0 : base::MutexGuard guard(heap()->relocation_mutex());
4609 :
4610 : {
4611 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
4612 0 : EvacuatePrologue();
4613 : }
4614 :
4615 : {
4616 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
4617 0 : EvacuatePagesInParallel();
4618 : }
4619 :
4620 0 : UpdatePointersAfterEvacuation();
4621 :
4622 : {
4623 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
4624 0 : if (!heap()->new_space()->Rebalance()) {
4625 0 : heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4626 0 : }
4627 : }
4628 :
4629 : {
4630 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
4631 0 : for (Page* p : new_space_evacuation_pages_) {
4632 0 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
4633 : p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4634 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
4635 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4636 : p->SetFlag(Page::SWEEP_TO_ITERATE);
4637 0 : sweep_to_iterate_pages_.push_back(p);
4638 : }
4639 : }
4640 0 : new_space_evacuation_pages_.clear();
4641 : }
4642 :
4643 : {
4644 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
4645 0 : EvacuateEpilogue();
4646 0 : }
4647 0 : }
4648 :
4649 : namespace {
4650 :
4651 0 : class YoungGenerationEvacuator : public Evacuator {
4652 : public:
4653 : YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
4654 : RecordMigratedSlotVisitor* record_visitor)
4655 0 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
4656 :
4657 0 : GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
4658 0 : return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
4659 : }
4660 :
4661 : protected:
4662 : void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
4663 :
4664 : MinorMarkCompactCollector* collector_;
4665 : };
4666 :
4667 0 : void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
4668 : intptr_t* live_bytes) {
4669 0 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4670 : "YoungGenerationEvacuator::RawEvacuatePage");
4671 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
4672 0 : collector_->non_atomic_marking_state();
4673 0 : *live_bytes = marking_state->live_bytes(chunk);
4674 0 : switch (ComputeEvacuationMode(chunk)) {
4675 : case kObjectsNewToOld:
4676 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4677 : chunk, marking_state, &new_space_visitor_,
4678 0 : LiveObjectVisitor::kClearMarkbits);
4679 : // ArrayBufferTracker will be updated during pointers updating.
4680 0 : break;
4681 : case kPageNewToOld:
4682 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4683 : chunk, marking_state, &new_to_old_page_visitor_,
4684 0 : LiveObjectVisitor::kKeepMarking);
4685 : new_to_old_page_visitor_.account_moved_bytes(
4686 : marking_state->live_bytes(chunk));
4687 0 : if (!chunk->IsLargePage()) {
4688 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
4689 : // delay it until the next GC.
4690 0 : ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4691 0 : if (heap()->ShouldZapGarbage()) {
4692 : collector_->MakeIterable(static_cast<Page*>(chunk),
4693 : MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4694 0 : } else if (heap()->incremental_marking()->IsMarking()) {
4695 : // When incremental marking is on, we need to clear the mark bits of
4696 : // the full collector. We cannot yet discard the young generation mark
4697 : // bits as they are still relevant for pointers updating.
4698 : collector_->MakeIterable(static_cast<Page*>(chunk),
4699 : MarkingTreatmentMode::KEEP,
4700 0 : IGNORE_FREE_SPACE);
4701 : }
4702 : }
4703 : break;
4704 : case kPageNewToNew:
4705 : LiveObjectVisitor::VisitGreyObjectsNoFail(
4706 : chunk, marking_state, &new_to_new_page_visitor_,
4707 0 : LiveObjectVisitor::kKeepMarking);
4708 : new_to_new_page_visitor_.account_moved_bytes(
4709 : marking_state->live_bytes(chunk));
4710 : DCHECK(!chunk->IsLargePage());
4711 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
4712 : // delay it until the next GC.
4713 0 : ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4714 : if (heap()->ShouldZapGarbage()) {
4715 : collector_->MakeIterable(static_cast<Page*>(chunk),
4716 : MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4717 0 : } else if (heap()->incremental_marking()->IsMarking()) {
4718 : // When incremental marking is on, we need to clear the mark bits of
4719 : // the full collector. We cannot yet discard the young generation mark
4720 : // bits as they are still relevant for pointers updating.
4721 : collector_->MakeIterable(static_cast<Page*>(chunk),
4722 0 : MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
4723 : }
4724 : break;
4725 : case kObjectsOldToOld:
4726 0 : UNREACHABLE();
4727 : break;
4728 0 : }
4729 0 : }
4730 :
4731 : } // namespace
4732 :
4733 0 : void MinorMarkCompactCollector::EvacuatePagesInParallel() {
4734 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
4735 0 : &page_parallel_job_semaphore_);
4736 : intptr_t live_bytes = 0;
4737 :
4738 0 : for (Page* page : new_space_evacuation_pages_) {
4739 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
4740 0 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
4741 0 : live_bytes += live_bytes_on_page;
4742 0 : if (ShouldMovePage(page, live_bytes_on_page)) {
4743 0 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
4744 0 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
4745 : } else {
4746 0 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
4747 : }
4748 : }
4749 0 : evacuation_job.AddItem(new EvacuationItem(page));
4750 : }
4751 :
4752 : // Promote young generation large objects.
4753 0 : for (auto it = heap()->new_lo_space()->begin();
4754 : it != heap()->new_lo_space()->end();) {
4755 : LargePage* current = *it;
4756 : it++;
4757 : HeapObject object = current->GetObject();
4758 : DCHECK(!non_atomic_marking_state_.IsBlack(object));
4759 0 : if (non_atomic_marking_state_.IsGrey(object)) {
4760 0 : heap_->lo_space()->PromoteNewLargeObject(current);
4761 : current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
4762 0 : evacuation_job.AddItem(new EvacuationItem(current));
4763 : }
4764 : }
4765 0 : if (evacuation_job.NumberOfItems() == 0) return;
4766 :
4767 : YoungGenerationMigrationObserver observer(heap(),
4768 : heap()->mark_compact_collector());
4769 : YoungGenerationRecordMigratedSlotVisitor record_visitor(
4770 : heap()->mark_compact_collector());
4771 : CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
4772 0 : this, &evacuation_job, &record_visitor, &observer, live_bytes);
4773 : }
4774 :
4775 0 : int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4776 : ItemParallelJob* job) {
4777 : int pages = 0;
4778 0 : for (Page* p : new_space_evacuation_pages_) {
4779 0 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4780 0 : if (p->local_tracker() == nullptr) continue;
4781 :
4782 0 : pages++;
4783 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4784 0 : p, ArrayBufferTrackerUpdatingItem::kRegular));
4785 : }
4786 : }
4787 0 : return pages;
4788 : }
4789 :
4790 : #endif // ENABLE_MINOR_MC
4791 :
4792 : } // namespace internal
4793 178779 : } // namespace v8
|