Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/mark-compact.h"
6 :
7 : #include <unordered_map>
8 :
9 : #include "src/cancelable-task.h"
10 : #include "src/code-stubs.h"
11 : #include "src/compilation-cache.h"
12 : #include "src/deoptimizer.h"
13 : #include "src/execution.h"
14 : #include "src/frames-inl.h"
15 : #include "src/global-handles.h"
16 : #include "src/heap/array-buffer-tracker-inl.h"
17 : #include "src/heap/concurrent-marking.h"
18 : #include "src/heap/gc-tracer.h"
19 : #include "src/heap/incremental-marking.h"
20 : #include "src/heap/invalidated-slots-inl.h"
21 : #include "src/heap/item-parallel-job.h"
22 : #include "src/heap/local-allocator.h"
23 : #include "src/heap/mark-compact-inl.h"
24 : #include "src/heap/object-stats.h"
25 : #include "src/heap/objects-visiting-inl.h"
26 : #include "src/heap/spaces-inl.h"
27 : #include "src/heap/worklist.h"
28 : #include "src/ic/stub-cache.h"
29 : #include "src/transitions-inl.h"
30 : #include "src/utils-inl.h"
31 : #include "src/v8.h"
32 :
33 : namespace v8 {
34 : namespace internal {
35 :
36 : const char* Marking::kWhiteBitPattern = "00";
37 : const char* Marking::kBlackBitPattern = "11";
38 : const char* Marking::kGreyBitPattern = "10";
39 : const char* Marking::kImpossibleBitPattern = "01";
40 :
41 : // The following has to hold in order for {MarkingState::MarkBitFrom} to not
42 : // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43 : STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44 :
45 : // =============================================================================
46 : // Verifiers
47 : // =============================================================================
48 :
49 : #ifdef VERIFY_HEAP
50 : namespace {
51 :
52 : class MarkingVerifier : public ObjectVisitor, public RootVisitor {
53 : public:
54 : virtual void Run() = 0;
55 :
56 : protected:
57 : explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
58 :
59 : virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
60 :
61 : virtual void VerifyPointers(Object** start, Object** end) = 0;
62 :
63 : virtual bool IsMarked(HeapObject* object) = 0;
64 :
65 : virtual bool IsBlackOrGrey(HeapObject* object) = 0;
66 :
67 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
68 : VerifyPointers(start, end);
69 : }
70 :
71 : void VisitRootPointers(Root root, Object** start, Object** end) override {
72 : VerifyPointers(start, end);
73 : }
74 :
75 : void VerifyRoots(VisitMode mode);
76 : void VerifyMarkingOnPage(const Page* page, Address start, Address end);
77 : void VerifyMarking(NewSpace* new_space);
78 : void VerifyMarking(PagedSpace* paged_space);
79 :
80 : Heap* heap_;
81 : };
82 :
83 : void MarkingVerifier::VerifyRoots(VisitMode mode) {
84 : heap_->IterateStrongRoots(this, mode);
85 : }
86 :
87 : void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
88 : Address end) {
89 : HeapObject* object;
90 : Address next_object_must_be_here_or_later = start;
91 : for (Address current = start; current < end;) {
92 : object = HeapObject::FromAddress(current);
93 : // One word fillers at the end of a black area can be grey.
94 : if (IsBlackOrGrey(object) &&
95 : object->map() != heap_->one_pointer_filler_map()) {
96 : CHECK(IsMarked(object));
97 : CHECK(current >= next_object_must_be_here_or_later);
98 : object->Iterate(this);
99 : next_object_must_be_here_or_later = current + object->Size();
100 : // The object is either part of a black area of black allocation or a
101 : // regular black object
102 : CHECK(
103 : bitmap(page)->AllBitsSetInRange(
104 : page->AddressToMarkbitIndex(current),
105 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
106 : bitmap(page)->AllBitsClearInRange(
107 : page->AddressToMarkbitIndex(current + kPointerSize * 2),
108 : page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
109 : current = next_object_must_be_here_or_later;
110 : } else {
111 : current += kPointerSize;
112 : }
113 : }
114 : }
115 :
116 : void MarkingVerifier::VerifyMarking(NewSpace* space) {
117 : Address end = space->top();
118 : // The bottom position is at the start of its page. Allows us to use
119 : // page->area_start() as start of range on all pages.
120 : CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
121 :
122 : PageRange range(space->bottom(), end);
123 : for (auto it = range.begin(); it != range.end();) {
124 : Page* page = *(it++);
125 : Address limit = it != range.end() ? page->area_end() : end;
126 : CHECK(limit == end || !page->Contains(end));
127 : VerifyMarkingOnPage(page, page->area_start(), limit);
128 : }
129 : }
130 :
131 : void MarkingVerifier::VerifyMarking(PagedSpace* space) {
132 : for (Page* p : *space) {
133 : VerifyMarkingOnPage(p, p->area_start(), p->area_end());
134 : }
135 : }
136 :
137 : class FullMarkingVerifier : public MarkingVerifier {
138 : public:
139 : explicit FullMarkingVerifier(Heap* heap)
140 : : MarkingVerifier(heap),
141 : marking_state_(
142 : heap->mark_compact_collector()->non_atomic_marking_state()) {}
143 :
144 : void Run() override {
145 : VerifyRoots(VISIT_ONLY_STRONG);
146 : VerifyMarking(heap_->new_space());
147 : VerifyMarking(heap_->old_space());
148 : VerifyMarking(heap_->code_space());
149 : VerifyMarking(heap_->map_space());
150 :
151 : LargeObjectIterator it(heap_->lo_space());
152 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
153 : if (marking_state_->IsBlackOrGrey(obj)) {
154 : obj->Iterate(this);
155 : }
156 : }
157 : }
158 :
159 : protected:
160 : Bitmap* bitmap(const MemoryChunk* chunk) override {
161 : return marking_state_->bitmap(chunk);
162 : }
163 :
164 : bool IsMarked(HeapObject* object) override {
165 : return marking_state_->IsBlack(object);
166 : }
167 :
168 : bool IsBlackOrGrey(HeapObject* object) override {
169 : return marking_state_->IsBlackOrGrey(object);
170 : }
171 :
172 : void VerifyPointers(Object** start, Object** end) override {
173 : for (Object** current = start; current < end; current++) {
174 : if ((*current)->IsHeapObject()) {
175 : HeapObject* object = HeapObject::cast(*current);
176 : CHECK(marking_state_->IsBlackOrGrey(object));
177 : }
178 : }
179 : }
180 :
181 : void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
182 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
183 : if (!host->IsWeakObject(rinfo->target_object())) {
184 : Object* p = rinfo->target_object();
185 : VisitPointer(host, &p);
186 : }
187 : }
188 :
189 : private:
190 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
191 : };
192 :
193 : class YoungGenerationMarkingVerifier : public MarkingVerifier {
194 : public:
195 : explicit YoungGenerationMarkingVerifier(Heap* heap)
196 : : MarkingVerifier(heap),
197 : marking_state_(
198 : heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
199 :
200 : Bitmap* bitmap(const MemoryChunk* chunk) override {
201 : return marking_state_->bitmap(chunk);
202 : }
203 :
204 : bool IsMarked(HeapObject* object) override {
205 : return marking_state_->IsGrey(object);
206 : }
207 :
208 : bool IsBlackOrGrey(HeapObject* object) override {
209 : return marking_state_->IsBlackOrGrey(object);
210 : }
211 :
212 : void Run() override {
213 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
214 : VerifyMarking(heap_->new_space());
215 : }
216 :
217 : void VerifyPointers(Object** start, Object** end) override {
218 : for (Object** current = start; current < end; current++) {
219 : if ((*current)->IsHeapObject()) {
220 : HeapObject* object = HeapObject::cast(*current);
221 : if (!heap_->InNewSpace(object)) return;
222 : CHECK(IsMarked(object));
223 : }
224 : }
225 : }
226 :
227 : private:
228 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
229 : };
230 :
231 : class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
232 : public:
233 : virtual void Run() = 0;
234 :
235 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
236 : VerifyPointers(start, end);
237 : }
238 :
239 : void VisitRootPointers(Root root, Object** start, Object** end) override {
240 : VerifyPointers(start, end);
241 : }
242 :
243 : protected:
244 : explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
245 :
246 : inline Heap* heap() { return heap_; }
247 :
248 : virtual void VerifyPointers(Object** start, Object** end) = 0;
249 :
250 : void VerifyRoots(VisitMode mode);
251 : void VerifyEvacuationOnPage(Address start, Address end);
252 : void VerifyEvacuation(NewSpace* new_space);
253 : void VerifyEvacuation(PagedSpace* paged_space);
254 :
255 : Heap* heap_;
256 : };
257 :
258 : void EvacuationVerifier::VerifyRoots(VisitMode mode) {
259 : heap_->IterateStrongRoots(this, mode);
260 : }
261 :
262 : void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
263 : Address current = start;
264 : while (current < end) {
265 : HeapObject* object = HeapObject::FromAddress(current);
266 : if (!object->IsFiller()) object->Iterate(this);
267 : current += object->Size();
268 : }
269 : }
270 :
271 : void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
272 : PageRange range(space->bottom(), space->top());
273 : for (auto it = range.begin(); it != range.end();) {
274 : Page* page = *(it++);
275 : Address current = page->area_start();
276 : Address limit = it != range.end() ? page->area_end() : space->top();
277 : CHECK(limit == space->top() || !page->Contains(space->top()));
278 : VerifyEvacuationOnPage(current, limit);
279 : }
280 : }
281 :
282 : void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
283 : for (Page* p : *space) {
284 : if (p->IsEvacuationCandidate()) continue;
285 : if (p->Contains(space->top()))
286 : heap_->CreateFillerObjectAt(
287 : space->top(), static_cast<int>(space->limit() - space->top()),
288 : ClearRecordedSlots::kNo);
289 :
290 : VerifyEvacuationOnPage(p->area_start(), p->area_end());
291 : }
292 : }
293 :
294 : class FullEvacuationVerifier : public EvacuationVerifier {
295 : public:
296 : explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
297 :
298 : void Run() override {
299 : VerifyRoots(VISIT_ALL);
300 : VerifyEvacuation(heap_->new_space());
301 : VerifyEvacuation(heap_->old_space());
302 : VerifyEvacuation(heap_->code_space());
303 : VerifyEvacuation(heap_->map_space());
304 : }
305 :
306 : protected:
307 : void VerifyPointers(Object** start, Object** end) override {
308 : for (Object** current = start; current < end; current++) {
309 : if ((*current)->IsHeapObject()) {
310 : HeapObject* object = HeapObject::cast(*current);
311 : if (heap()->InNewSpace(object)) {
312 : CHECK(heap()->InToSpace(object));
313 : }
314 : CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
315 : }
316 : }
317 : }
318 : };
319 :
320 : class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
321 : public:
322 : explicit YoungGenerationEvacuationVerifier(Heap* heap)
323 : : EvacuationVerifier(heap) {}
324 :
325 : void Run() override {
326 : VerifyRoots(VISIT_ALL_IN_SCAVENGE);
327 : VerifyEvacuation(heap_->new_space());
328 : VerifyEvacuation(heap_->old_space());
329 : VerifyEvacuation(heap_->code_space());
330 : VerifyEvacuation(heap_->map_space());
331 : }
332 :
333 : protected:
334 : void VerifyPointers(Object** start, Object** end) override {
335 : for (Object** current = start; current < end; current++) {
336 : if ((*current)->IsHeapObject()) {
337 : HeapObject* object = HeapObject::cast(*current);
338 : CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
339 : }
340 : }
341 : }
342 : };
343 :
344 : } // namespace
345 : #endif // VERIFY_HEAP
346 :
347 : // =============================================================================
348 : // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
349 : // =============================================================================
350 :
351 : using MarkCompactMarkingVisitor =
352 : MarkingVisitor<FixedArrayVisitationMode::kRegular,
353 : TraceRetainingPathMode::kEnabled, MajorAtomicMarkingState>;
354 :
355 : namespace {
356 :
357 : // This root visitor walks all roots and creates items bundling objects that
358 : // are then processed later on. Slots have to be dereferenced as they could
359 : // live on the native (C++) stack, which requires filtering out the indirection.
360 : template <class BatchedItem>
361 0 : class RootMarkingVisitorSeedOnly : public RootVisitor {
362 : public:
363 0 : explicit RootMarkingVisitorSeedOnly(ItemParallelJob* job) : job_(job) {
364 0 : buffered_objects_.reserve(kBufferSize);
365 : }
366 :
367 0 : void VisitRootPointer(Root root, Object** p) override {
368 0 : if (!(*p)->IsHeapObject()) return;
369 0 : AddObject(*p);
370 : }
371 :
372 0 : void VisitRootPointers(Root root, Object** start, Object** end) override {
373 0 : for (Object** p = start; p < end; p++) {
374 0 : if (!(*p)->IsHeapObject()) continue;
375 0 : AddObject(*p);
376 : }
377 0 : }
378 :
379 0 : void FlushObjects() {
380 0 : job_->AddItem(new BatchedItem(std::move(buffered_objects_)));
381 : // Moving leaves the container in a valid but unspecified state. Reusing the
382 : // container requires a call without precondition that resets the state.
383 : buffered_objects_.clear();
384 0 : buffered_objects_.reserve(kBufferSize);
385 0 : }
386 :
387 : private:
388 : // Bundling several objects together in items avoids issues with allocating
389 : // and deallocating items; both are operations that are performed on the main
390 : // thread.
391 : static const int kBufferSize = 128;
392 :
393 0 : void AddObject(Object* object) {
394 0 : buffered_objects_.push_back(object);
395 0 : if (buffered_objects_.size() == kBufferSize) FlushObjects();
396 0 : }
397 :
398 : ItemParallelJob* job_;
399 : std::vector<Object*> buffered_objects_;
400 : };
401 :
402 : } // namespace
403 :
404 216724 : static int NumberOfAvailableCores() {
405 : return Max(
406 : 1, static_cast<int>(
407 433448 : V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
408 : }
409 :
410 0 : int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
411 : DCHECK_GT(pages, 0);
412 47003 : return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
413 : }
414 :
415 113379 : int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
416 : int slots) {
417 : DCHECK_GT(pages, 0);
418 : // Limit the number of update tasks as task creation often dominates the
419 : // actual work that is being done.
420 : const int kMaxPointerUpdateTasks = 8;
421 : const int kSlotsPerTask = 600;
422 : const int wanted_tasks =
423 113379 : (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
424 : return FLAG_parallel_pointer_update
425 : ? Min(kMaxPointerUpdateTasks,
426 113173 : Min(NumberOfAvailableCores(), wanted_tasks))
427 226758 : : 1;
428 : }
429 :
430 0 : int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
431 : int pages) {
432 : DCHECK_GT(pages, 0);
433 : // No cap needed because all pages we need to process are fully filled with
434 : // interesting objects.
435 56697 : return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
436 56800 : : 1;
437 : }
438 :
439 0 : int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
440 : DCHECK_GT(pages, 0);
441 0 : if (!FLAG_minor_mc_parallel_marking) return 1;
442 : // Pages are not private to markers but we can still use them to estimate the
443 : // amount of marking that is required.
444 : const int kPagesPerTask = 2;
445 0 : const int wanted_tasks = Max(1, pages / kPagesPerTask);
446 0 : return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
447 : }
448 :
449 54999 : MarkCompactCollector::MarkCompactCollector(Heap* heap)
450 : : MarkCompactCollectorBase(heap),
451 : page_parallel_job_semaphore_(0),
452 : #ifdef DEBUG
453 : state_(IDLE),
454 : #endif
455 : was_marked_incrementally_(false),
456 : evacuation_(false),
457 : compacting_(false),
458 : black_allocation_(false),
459 : have_code_to_deoptimize_(false),
460 : marking_worklist_(heap),
461 109997 : sweeper_(heap, non_atomic_marking_state()) {
462 54999 : old_to_new_slots_ = -1;
463 54999 : }
464 :
465 54999 : void MarkCompactCollector::SetUp() {
466 : DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
467 : DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
468 : DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
469 : DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
470 54999 : }
471 :
472 54999 : void MinorMarkCompactCollector::SetUp() {}
473 :
474 53365 : void MarkCompactCollector::TearDown() {
475 53365 : AbortCompaction();
476 : AbortWeakObjects();
477 106730 : if (heap()->incremental_marking()->IsMarking()) {
478 158 : marking_worklist()->Clear();
479 : }
480 53365 : }
481 :
482 53365 : void MinorMarkCompactCollector::TearDown() {}
483 :
484 0 : void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
485 : DCHECK(!p->NeverEvacuate());
486 3784 : p->MarkEvacuationCandidate();
487 3784 : evacuation_candidates_.push_back(p);
488 0 : }
489 :
490 :
491 0 : static void TraceFragmentation(PagedSpace* space) {
492 0 : int number_of_pages = space->CountTotalPages();
493 0 : intptr_t reserved = (number_of_pages * space->AreaSize());
494 0 : intptr_t free = reserved - space->SizeOfObjects();
495 : PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
496 : AllocationSpaceName(space->identity()), number_of_pages,
497 0 : static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
498 0 : }
499 :
500 58104 : bool MarkCompactCollector::StartCompaction() {
501 58104 : if (!compacting_) {
502 : DCHECK(evacuation_candidates_.empty());
503 :
504 116208 : CollectEvacuationCandidates(heap()->old_space());
505 :
506 58104 : if (FLAG_compact_code_space) {
507 58104 : CollectEvacuationCandidates(heap()->code_space());
508 0 : } else if (FLAG_trace_fragmentation) {
509 0 : TraceFragmentation(heap()->code_space());
510 : }
511 :
512 58104 : if (FLAG_trace_fragmentation) {
513 0 : TraceFragmentation(heap()->map_space());
514 : }
515 :
516 58104 : compacting_ = !evacuation_candidates_.empty();
517 : }
518 :
519 58104 : return compacting_;
520 : }
521 :
522 56800 : void MarkCompactCollector::CollectGarbage() {
523 : // Make sure that Prepare() has been called. The individual steps below will
524 : // update the state as they proceed.
525 : DCHECK(state_ == PREPARE_GC);
526 :
527 56800 : heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
528 :
529 56800 : MarkLiveObjects();
530 56800 : ClearNonLiveReferences();
531 56800 : VerifyMarking();
532 :
533 56800 : RecordObjectStats();
534 :
535 56800 : StartSweepSpaces();
536 :
537 56800 : Evacuate();
538 :
539 56800 : Finish();
540 56800 : }
541 :
542 : #ifdef VERIFY_HEAP
543 : void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
544 : for (Page* p : *space) {
545 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
546 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
547 : }
548 : }
549 :
550 :
551 : void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
552 : for (Page* p : PageRange(space->bottom(), space->top())) {
553 : CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
554 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
555 : }
556 : }
557 :
558 :
559 : void MarkCompactCollector::VerifyMarkbitsAreClean() {
560 : VerifyMarkbitsAreClean(heap_->old_space());
561 : VerifyMarkbitsAreClean(heap_->code_space());
562 : VerifyMarkbitsAreClean(heap_->map_space());
563 : VerifyMarkbitsAreClean(heap_->new_space());
564 :
565 : LargeObjectIterator it(heap_->lo_space());
566 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
567 : CHECK(non_atomic_marking_state()->IsWhite(obj));
568 : CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
569 : MemoryChunk::FromAddress(obj->address())));
570 : }
571 : }
572 :
573 : void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
574 : HeapObjectIterator code_iterator(heap()->code_space());
575 : for (HeapObject* obj = code_iterator.Next(); obj != nullptr;
576 : obj = code_iterator.Next()) {
577 : Code* code = Code::cast(obj);
578 : if (!code->is_optimized_code()) continue;
579 : if (WillBeDeoptimized(code)) continue;
580 : code->VerifyEmbeddedObjectsDependency();
581 : }
582 : }
583 :
584 : #endif // VERIFY_HEAP
585 :
586 2736 : void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
587 18758 : for (Page* p : *space) {
588 : non_atomic_marking_state()->ClearLiveness(p);
589 : }
590 2736 : }
591 :
592 912 : void MarkCompactCollector::ClearMarkbitsInNewSpace(NewSpace* space) {
593 5236 : for (Page* p : *space) {
594 : non_atomic_marking_state()->ClearLiveness(p);
595 : }
596 912 : }
597 :
598 :
599 912 : void MarkCompactCollector::ClearMarkbits() {
600 4560 : ClearMarkbitsInPagedSpace(heap_->code_space());
601 1824 : ClearMarkbitsInPagedSpace(heap_->map_space());
602 1824 : ClearMarkbitsInPagedSpace(heap_->old_space());
603 1824 : ClearMarkbitsInNewSpace(heap_->new_space());
604 1824 : heap_->lo_space()->ClearMarkingStateOfLiveObjects();
605 912 : }
606 :
607 29652 : MarkCompactCollector::Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(
608 : MarkCompactCollector::Sweeper* sweeper)
609 29652 : : sweeper_(sweeper) {
610 29652 : sweeper_->stop_sweeper_tasks_.SetValue(true);
611 88956 : if (!sweeper_->sweeping_in_progress()) return;
612 :
613 764 : sweeper_->AbortAndWaitForTasks();
614 :
615 : // Complete sweeping if there's nothing more to do.
616 1528 : if (sweeper_->IsDoneSweeping()) {
617 596 : sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
618 : DCHECK(!sweeper_->sweeping_in_progress());
619 : } else {
620 : // Unless sweeping is complete the flag still indicates that the sweeper
621 : // is enabled. It just cannot use tasks anymore.
622 : DCHECK(sweeper_->sweeping_in_progress());
623 : }
624 : }
625 :
626 29652 : MarkCompactCollector::Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
627 59304 : sweeper_->stop_sweeper_tasks_.SetValue(false);
628 59304 : if (!sweeper_->sweeping_in_progress()) return;
629 :
630 168 : sweeper_->StartSweeperTasks();
631 29652 : }
632 :
633 : class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
634 : public:
635 : SweeperTask(Isolate* isolate, Sweeper* sweeper,
636 : base::Semaphore* pending_sweeper_tasks,
637 : base::AtomicNumber<intptr_t>* num_sweeping_tasks,
638 : AllocationSpace space_to_start)
639 : : CancelableTask(isolate),
640 : sweeper_(sweeper),
641 : pending_sweeper_tasks_(pending_sweeper_tasks),
642 : num_sweeping_tasks_(num_sweeping_tasks),
643 168981 : space_to_start_(space_to_start) {}
644 :
645 337861 : virtual ~SweeperTask() {}
646 :
647 : private:
648 163669 : void RunInternal() final {
649 : DCHECK_GE(space_to_start_, FIRST_SPACE);
650 : DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
651 163669 : const int offset = space_to_start_ - FIRST_SPACE;
652 : const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
653 819687 : for (int i = 0; i < num_spaces; i++) {
654 655671 : const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
655 : DCHECK_GE(space_id, FIRST_SPACE);
656 : DCHECK_LE(space_id, LAST_PAGED_SPACE);
657 655671 : sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
658 : }
659 164016 : num_sweeping_tasks_->Decrement(1);
660 164018 : pending_sweeper_tasks_->Signal();
661 163988 : }
662 :
663 : Sweeper* const sweeper_;
664 : base::Semaphore* const pending_sweeper_tasks_;
665 : base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
666 : AllocationSpace space_to_start_;
667 :
668 : DISALLOW_COPY_AND_ASSIGN(SweeperTask);
669 : };
670 :
671 56800 : void MarkCompactCollector::Sweeper::StartSweeping() {
672 56800 : CHECK(!stop_sweeper_tasks_.Value());
673 56800 : sweeping_in_progress_ = true;
674 : NonAtomicMarkingState* marking_state =
675 56800 : heap_->mark_compact_collector()->non_atomic_marking_state();
676 227200 : ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
677 227200 : std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
678 384343 : [marking_state](Page* a, Page* b) {
679 : return marking_state->live_bytes(a) <
680 : marking_state->live_bytes(b);
681 681600 : });
682 227200 : });
683 56800 : }
684 :
685 56973 : void MarkCompactCollector::Sweeper::StartSweeperTasks() {
686 : DCHECK_EQ(0, num_tasks_);
687 : DCHECK_EQ(0, num_sweeping_tasks_.Value());
688 113310 : if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
689 56337 : !heap_->delay_sweeper_tasks_for_testing_) {
690 225308 : ForAllSweepingSpaces([this](AllocationSpace space) {
691 450616 : if (space == NEW_SPACE) return;
692 168981 : num_sweeping_tasks_.Increment(1);
693 168981 : SweeperTask* task = new SweeperTask(heap_->isolate(), this,
694 : &pending_sweeper_tasks_semaphore_,
695 168981 : &num_sweeping_tasks_, space);
696 : DCHECK_LT(num_tasks_, kMaxSweeperTasks);
697 168981 : task_ids_[num_tasks_++] = task->id();
698 168981 : V8::GetCurrentPlatform()->CallOnBackgroundThread(
699 168981 : task, v8::Platform::kShortRunningTask);
700 : });
701 : }
702 56973 : }
703 :
704 1554239 : void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
705 : Page* page) {
706 1554239 : if (!page->SweepingDone()) {
707 4526 : ParallelSweepPage(page, page->owner()->identity());
708 4526 : if (!page->SweepingDone()) {
709 : // We were not able to sweep that page, i.e., a concurrent
710 : // sweeper thread currently owns this page. Wait for the sweeper
711 : // thread to be done with this page.
712 : page->WaitUntilSweepingCompleted();
713 : }
714 : }
715 1554239 : }
716 :
717 684469 : Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
718 684469 : base::LockGuard<base::Mutex> guard(&mutex_);
719 684490 : SweptList& list = swept_list_[space->identity()];
720 684490 : if (!list.empty()) {
721 422852 : auto last_page = list.back();
722 : list.pop_back();
723 422852 : return last_page;
724 : }
725 : return nullptr;
726 : }
727 :
728 57499 : void MarkCompactCollector::Sweeper::AbortAndWaitForTasks() {
729 114998 : if (!FLAG_concurrent_sweeping) return;
730 :
731 168786 : for (int i = 0; i < num_tasks_; i++) {
732 337572 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
733 : CancelableTaskManager::kTaskAborted) {
734 163824 : pending_sweeper_tasks_semaphore_.Wait();
735 : } else {
736 : // Aborted case.
737 : num_sweeping_tasks_.Decrement(1);
738 : }
739 : }
740 56983 : num_tasks_ = 0;
741 : DCHECK_EQ(0, num_sweeping_tasks_.Value());
742 : }
743 :
744 56735 : void MarkCompactCollector::Sweeper::EnsureCompleted() {
745 113470 : if (!sweeping_in_progress_) return;
746 :
747 : // If sweeping is not completed or not running at all, we try to complete it
748 : // here.
749 : ForAllSweepingSpaces(
750 283675 : [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
751 :
752 56735 : AbortAndWaitForTasks();
753 :
754 : ForAllSweepingSpaces([this](AllocationSpace space) {
755 226940 : if (space == NEW_SPACE) {
756 : swept_list_[NEW_SPACE].clear();
757 : }
758 : DCHECK(sweeping_list_[space].empty());
759 : });
760 56735 : sweeping_in_progress_ = false;
761 : }
762 :
763 29652 : void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
764 59304 : if (!sweeping_in_progress_) return;
765 764 : if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
766 8984 : for (Page* p : *heap_->new_space()) {
767 3728 : SweepOrWaitUntilSweepingCompleted(p);
768 : }
769 : }
770 : }
771 :
772 143871 : void MarkCompactCollector::EnsureSweepingCompleted() {
773 287742 : if (!sweeper().sweeping_in_progress()) return;
774 :
775 56735 : sweeper().EnsureCompleted();
776 170205 : heap()->old_space()->RefillFreeList();
777 56735 : heap()->code_space()->RefillFreeList();
778 56735 : heap()->map_space()->RefillFreeList();
779 :
780 : #ifdef VERIFY_HEAP
781 : if (FLAG_verify_heap && !evacuation()) {
782 : FullEvacuationVerifier verifier(heap());
783 : verifier.Run();
784 : }
785 : #endif
786 :
787 113470 : if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
788 15585 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
789 : }
790 :
791 32589 : bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
792 32589 : return num_sweeping_tasks_.Value() != 0;
793 : }
794 :
795 114404 : void MarkCompactCollector::ComputeEvacuationHeuristics(
796 : size_t area_size, int* target_fragmentation_percent,
797 : size_t* max_evacuated_bytes) {
798 : // For memory reducing and optimize for memory mode we directly define both
799 : // constants.
800 : const int kTargetFragmentationPercentForReduceMemory = 20;
801 : const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
802 : const int kTargetFragmentationPercentForOptimizeMemory = 20;
803 : const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
804 :
805 : // For regular mode (which is latency critical) we define less aggressive
806 : // defaults to start and switch to a trace-based (using compaction speed)
807 : // approach as soon as we have enough samples.
808 : const int kTargetFragmentationPercent = 70;
809 : const size_t kMaxEvacuatedBytes = 4 * MB;
810 : // Time to take for a single area (=payload of page). Used as soon as there
811 : // exist enough compaction speed samples.
812 : const float kTargetMsPerArea = .5;
813 :
814 316826 : if (heap()->ShouldReduceMemory()) {
815 26386 : *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
816 26386 : *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
817 88018 : } else if (heap()->ShouldOptimizeForMemoryUsage()) {
818 : *target_fragmentation_percent =
819 0 : kTargetFragmentationPercentForOptimizeMemory;
820 0 : *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
821 : } else {
822 : const double estimated_compaction_speed =
823 88018 : heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
824 88018 : if (estimated_compaction_speed != 0) {
825 : // Estimate the target fragmentation based on traced compaction speed
826 : // and a goal for a single page.
827 : const double estimated_ms_per_area =
828 61488 : 1 + area_size / estimated_compaction_speed;
829 : *target_fragmentation_percent = static_cast<int>(
830 61488 : 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
831 61488 : if (*target_fragmentation_percent <
832 : kTargetFragmentationPercentForReduceMemory) {
833 : *target_fragmentation_percent =
834 0 : kTargetFragmentationPercentForReduceMemory;
835 : }
836 : } else {
837 26530 : *target_fragmentation_percent = kTargetFragmentationPercent;
838 : }
839 88018 : *max_evacuated_bytes = kMaxEvacuatedBytes;
840 : }
841 114404 : }
842 :
843 :
844 232416 : void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
845 : DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
846 :
847 116208 : int number_of_pages = space->CountTotalPages();
848 116208 : size_t area_size = space->AreaSize();
849 :
850 : // Pairs of (live_bytes_in_page, page).
851 : typedef std::pair<size_t, Page*> LiveBytesPagePair;
852 : std::vector<LiveBytesPagePair> pages;
853 116208 : pages.reserve(number_of_pages);
854 :
855 : DCHECK(!sweeping_in_progress());
856 : Page* owner_of_linear_allocation_area =
857 : space->top() == space->limit()
858 : ? nullptr
859 116208 : : Page::FromAllocationAreaAddress(space->top());
860 845098 : for (Page* p : *space) {
861 487595 : if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
862 : !p->CanAllocate())
863 : continue;
864 : // Invariant: Evacuation candidates are just created when marking is
865 : // started. This means that sweeping has finished. Furthermore, at the end
866 : // of a GC all evacuation candidates are cleared and their slot buffers are
867 : // released.
868 90627 : CHECK(!p->IsEvacuationCandidate());
869 90627 : CHECK_NULL(p->slot_set<OLD_TO_OLD>());
870 90627 : CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
871 90627 : CHECK(p->SweepingDone());
872 : DCHECK(p->area_size() == area_size);
873 181254 : pages.push_back(std::make_pair(p->allocated_bytes(), p));
874 : }
875 :
876 : int candidate_count = 0;
877 : size_t total_live_bytes = 0;
878 :
879 116208 : const bool reduce_memory = heap()->ShouldReduceMemory();
880 116208 : if (FLAG_manual_evacuation_candidates_selection) {
881 800 : for (size_t i = 0; i < pages.size(); i++) {
882 246 : Page* p = pages[i].second;
883 246 : if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
884 106 : candidate_count++;
885 106 : total_live_bytes += pages[i].first;
886 : p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
887 : AddEvacuationCandidate(p);
888 : }
889 : }
890 115900 : } else if (FLAG_stress_compaction) {
891 4262 : for (size_t i = 0; i < pages.size(); i++) {
892 1383 : Page* p = pages[i].second;
893 1383 : if (i % 2 == 0) {
894 887 : candidate_count++;
895 887 : total_live_bytes += pages[i].first;
896 : AddEvacuationCandidate(p);
897 : }
898 : }
899 : } else {
900 : // The following approach determines the pages that should be evacuated.
901 : //
902 : // We use two conditions to decide whether a page qualifies as an evacuation
903 : // candidate, or not:
904 : // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
905 : // between live bytes and capacity of this page (= area).
906 : // * Evacuation quota: A global quota determining how much bytes should be
907 : // compacted.
908 : //
909 : // The algorithm sorts all pages by live bytes and then iterates through
910 : // them starting with the page with the most free memory, adding them to the
911 : // set of evacuation candidates as long as both conditions (fragmentation
912 : // and quota) hold.
913 : size_t max_evacuated_bytes;
914 : int target_fragmentation_percent;
915 : ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
916 114404 : &max_evacuated_bytes);
917 :
918 : const size_t free_bytes_threshold =
919 114404 : target_fragmentation_percent * (area_size / 100);
920 :
921 : // Sort pages from the most free to the least free, then select
922 : // the first n pages for evacuation such that:
923 : // - the total size of evacuated objects does not exceed the specified
924 : // limit.
925 : // - fragmentation of (n+1)-th page does not exceed the specified limit.
926 : std::sort(pages.begin(), pages.end(),
927 : [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
928 : return a.first < b.first;
929 114404 : });
930 406804 : for (size_t i = 0; i < pages.size(); i++) {
931 88998 : size_t live_bytes = pages[i].first;
932 : DCHECK_GE(area_size, live_bytes);
933 88998 : size_t free_bytes = area_size - live_bytes;
934 88998 : if (FLAG_always_compact ||
935 30123 : ((free_bytes >= free_bytes_threshold) &&
936 30123 : ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
937 30182 : candidate_count++;
938 30182 : total_live_bytes += live_bytes;
939 : }
940 88998 : if (FLAG_trace_fragmentation_verbose) {
941 : PrintIsolate(isolate(),
942 : "compaction-selection-page: space=%s free_bytes_page=%zu "
943 : "fragmentation_limit_kb=%" PRIuS
944 : " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
945 : "compaction_limit_kb=%zu\n",
946 : AllocationSpaceName(space->identity()), free_bytes / KB,
947 : free_bytes_threshold / KB, target_fragmentation_percent,
948 0 : total_live_bytes / KB, max_evacuated_bytes / KB);
949 : }
950 : }
951 : // How many pages we will allocated for the evacuated objects
952 : // in the worst case: ceil(total_live_bytes / area_size)
953 : int estimated_new_pages =
954 114404 : static_cast<int>((total_live_bytes + area_size - 1) / area_size);
955 : DCHECK_LE(estimated_new_pages, candidate_count);
956 : int estimated_released_pages = candidate_count - estimated_new_pages;
957 : // Avoid (compact -> expand) cycles.
958 114404 : if ((estimated_released_pages == 0) && !FLAG_always_compact) {
959 : candidate_count = 0;
960 : }
961 117195 : for (int i = 0; i < candidate_count; i++) {
962 5582 : AddEvacuationCandidate(pages[i].second);
963 : }
964 : }
965 :
966 116208 : if (FLAG_trace_fragmentation) {
967 : PrintIsolate(isolate(),
968 : "compaction-selection: space=%s reduce_memory=%d pages=%d "
969 : "total_live_bytes=%zu\n",
970 : AllocationSpaceName(space->identity()), reduce_memory,
971 0 : candidate_count, total_live_bytes / KB);
972 : }
973 116208 : }
974 :
975 :
976 54277 : void MarkCompactCollector::AbortCompaction() {
977 54277 : if (compacting_) {
978 9 : RememberedSet<OLD_TO_OLD>::ClearAll(heap());
979 52 : for (Page* p : evacuation_candidates_) {
980 : p->ClearEvacuationCandidate();
981 : }
982 9 : compacting_ = false;
983 : evacuation_candidates_.clear();
984 : }
985 : DCHECK(evacuation_candidates_.empty());
986 54277 : }
987 :
988 :
989 56800 : void MarkCompactCollector::Prepare() {
990 384794 : was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
991 :
992 : #ifdef DEBUG
993 : DCHECK(state_ == IDLE);
994 : state_ = PREPARE_GC;
995 : #endif
996 :
997 : DCHECK(!FLAG_never_compact || !FLAG_always_compact);
998 :
999 : // Instead of waiting we could also abort the sweeper threads here.
1000 56800 : EnsureSweepingCompleted();
1001 :
1002 113600 : if (heap()->incremental_marking()->IsSweeping()) {
1003 2434 : heap()->incremental_marking()->Stop();
1004 : }
1005 :
1006 : // If concurrent unmapping tasks are still running, we should wait for
1007 : // them here.
1008 56800 : heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
1009 :
1010 : // Clear marking bits if incremental marking is aborted.
1011 115424 : if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
1012 912 : heap()->incremental_marking()->Stop();
1013 912 : heap()->incremental_marking()->AbortBlackAllocation();
1014 912 : FinishConcurrentMarking();
1015 912 : heap()->incremental_marking()->Deactivate();
1016 912 : ClearMarkbits();
1017 912 : AbortWeakCollections();
1018 : AbortWeakObjects();
1019 912 : AbortCompaction();
1020 1824 : heap_->local_embedder_heap_tracer()->AbortTracing();
1021 912 : marking_worklist()->Clear();
1022 912 : was_marked_incrementally_ = false;
1023 : }
1024 :
1025 56800 : if (!was_marked_incrementally_) {
1026 165032 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
1027 123774 : heap_->local_embedder_heap_tracer()->TracePrologue();
1028 : }
1029 :
1030 : // Don't start compaction if we are in the middle of incremental
1031 : // marking cycle. We did not collect any slots.
1032 56800 : if (!FLAG_never_compact && !was_marked_incrementally_) {
1033 41258 : StartCompaction();
1034 : }
1035 :
1036 : PagedSpaces spaces(heap());
1037 227200 : for (PagedSpace* space = spaces.next(); space != nullptr;
1038 : space = spaces.next()) {
1039 170400 : space->PrepareForMarkCompact();
1040 : }
1041 56800 : heap()->account_external_memory_concurrently_freed();
1042 :
1043 : #ifdef VERIFY_HEAP
1044 : if (!was_marked_incrementally_ && FLAG_verify_heap) {
1045 : VerifyMarkbitsAreClean();
1046 : }
1047 : #endif
1048 56800 : }
1049 :
1050 57712 : void MarkCompactCollector::FinishConcurrentMarking() {
1051 57712 : if (FLAG_concurrent_marking) {
1052 114136 : heap()->concurrent_marking()->EnsureCompleted();
1053 114136 : heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
1054 : }
1055 57712 : }
1056 :
1057 56800 : void MarkCompactCollector::VerifyMarking() {
1058 56800 : CHECK(marking_worklist()->IsEmpty());
1059 : DCHECK(heap_->incremental_marking()->IsStopped());
1060 : #ifdef VERIFY_HEAP
1061 : if (FLAG_verify_heap) {
1062 : FullMarkingVerifier verifier(heap());
1063 : verifier.Run();
1064 : }
1065 : #endif
1066 : #ifdef VERIFY_HEAP
1067 : heap()->old_space()->VerifyLiveBytes();
1068 : heap()->map_space()->VerifyLiveBytes();
1069 : heap()->code_space()->VerifyLiveBytes();
1070 : #endif
1071 56800 : }
1072 :
1073 56800 : void MarkCompactCollector::Finish() {
1074 284000 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
1075 :
1076 : #ifdef DEBUG
1077 : heap()->VerifyCountersBeforeConcurrentSweeping();
1078 : #endif
1079 :
1080 56800 : sweeper().StartSweeperTasks();
1081 :
1082 : // The hashing of weak_object_to_code_table is no longer valid.
1083 56800 : heap()->weak_object_to_code_table()->Rehash();
1084 :
1085 : // Clear the marking state of live large objects.
1086 56800 : heap_->lo_space()->ClearMarkingStateOfLiveObjects();
1087 :
1088 : #ifdef DEBUG
1089 : DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1090 : state_ = IDLE;
1091 : #endif
1092 113600 : heap_->isolate()->inner_pointer_to_code_cache()->Flush();
1093 :
1094 : // The stub caches are not traversed during GC; clear them to force
1095 : // their lazy re-initialization. This must be done after the
1096 : // GC, because it relies on the new address of certain old space
1097 : // objects (empty string, illegal builtin).
1098 56800 : isolate()->load_stub_cache()->Clear();
1099 56800 : isolate()->store_stub_cache()->Clear();
1100 :
1101 56800 : if (have_code_to_deoptimize_) {
1102 : // Some code objects were marked for deoptimization during the GC.
1103 934 : Deoptimizer::DeoptimizeMarkedCode(isolate());
1104 934 : have_code_to_deoptimize_ = false;
1105 56800 : }
1106 56800 : }
1107 :
1108 56800 : void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
1109 113600 : for (Page* p : sweep_to_iterate_pages_) {
1110 0 : if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
1111 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
1112 : non_atomic_marking_state()->ClearLiveness(p);
1113 : }
1114 : }
1115 : sweep_to_iterate_pages_.clear();
1116 56800 : }
1117 :
1118 56800 : class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
1119 : public:
1120 : explicit RootMarkingVisitor(MarkCompactCollector* collector)
1121 56800 : : collector_(collector) {}
1122 :
1123 168761934 : void VisitRootPointer(Root root, Object** p) final {
1124 : MarkObjectByPointer(root, p);
1125 168762351 : }
1126 :
1127 860133 : void VisitRootPointers(Root root, Object** start, Object** end) final {
1128 77909124 : for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
1129 860112 : }
1130 :
1131 : private:
1132 : V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
1133 491621892 : if (!(*p)->IsHeapObject()) return;
1134 :
1135 243170901 : collector_->MarkRootObject(root, HeapObject::cast(*p));
1136 : }
1137 :
1138 : MarkCompactCollector* const collector_;
1139 : };
1140 :
1141 : // This visitor is used to visit the body of special objects held alive by
1142 : // other roots.
1143 : //
1144 : // It is currently used for
1145 : // - Code held alive by the top optimized frame. This code cannot be deoptimized
1146 : // and thus have to be kept alive in an isolate way, i.e., it should not keep
1147 : // alive other code objects reachable through the weak list but they should
1148 : // keep alive its embedded pointers (which would otherwise be dropped).
1149 : // - Prefix of the string table.
1150 56800 : class MarkCompactCollector::CustomRootBodyMarkingVisitor final
1151 : : public ObjectVisitor {
1152 : public:
1153 : explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
1154 56800 : : collector_(collector) {}
1155 :
1156 9334 : void VisitPointer(HeapObject* host, Object** p) final {
1157 9334 : MarkObject(host, *p);
1158 9334 : }
1159 :
1160 57145 : void VisitPointers(HeapObject* host, Object** start, Object** end) final {
1161 57145 : for (Object** p = start; p < end; p++) MarkObject(host, *p);
1162 57145 : }
1163 :
1164 : // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
1165 :
1166 : // Skip the weak next code link in a code object.
1167 387 : void VisitNextCodeLink(Code* host, Object** p) override {}
1168 :
1169 : private:
1170 295059 : void MarkObject(HeapObject* host, Object* object) {
1171 590118 : if (!object->IsHeapObject()) return;
1172 67640 : collector_->MarkObject(host, HeapObject::cast(object));
1173 : }
1174 :
1175 : MarkCompactCollector* const collector_;
1176 : };
1177 :
1178 56800 : class InternalizedStringTableCleaner : public ObjectVisitor {
1179 : public:
1180 : InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
1181 56800 : : heap_(heap), pointers_removed_(0), table_(table) {}
1182 :
1183 56800 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
1184 : // Visit all HeapObject pointers in [start, end).
1185 56800 : Object* the_hole = heap_->the_hole_value();
1186 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
1187 : heap_->mark_compact_collector()->non_atomic_marking_state();
1188 242273760 : for (Object** p = start; p < end; p++) {
1189 242216960 : Object* o = *p;
1190 242216960 : if (o->IsHeapObject()) {
1191 : HeapObject* heap_object = HeapObject::cast(o);
1192 242216960 : if (marking_state->IsWhite(heap_object)) {
1193 5206100 : pointers_removed_++;
1194 : // Set the entry to the_hole_value (as deleted).
1195 5206100 : *p = the_hole;
1196 : } else {
1197 : // StringTable contains only old space strings.
1198 : DCHECK(!heap_->InNewSpace(o));
1199 237010860 : MarkCompactCollector::RecordSlot(table_, p, o);
1200 : }
1201 : }
1202 : }
1203 56800 : }
1204 :
1205 : int PointersRemoved() {
1206 : return pointers_removed_;
1207 : }
1208 :
1209 : private:
1210 : Heap* heap_;
1211 : int pointers_removed_;
1212 : HeapObject* table_;
1213 : };
1214 :
1215 56800 : class ExternalStringTableCleaner : public RootVisitor {
1216 : public:
1217 56800 : explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1218 :
1219 56941 : void VisitRootPointers(Root root, Object** start, Object** end) override {
1220 : // Visit all HeapObject pointers in [start, end).
1221 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
1222 56941 : heap_->mark_compact_collector()->non_atomic_marking_state();
1223 56941 : Object* the_hole = heap_->the_hole_value();
1224 682707 : for (Object** p = start; p < end; p++) {
1225 625766 : Object* o = *p;
1226 625766 : if (o->IsHeapObject()) {
1227 : HeapObject* heap_object = HeapObject::cast(o);
1228 625766 : if (marking_state->IsWhite(heap_object)) {
1229 5891 : if (o->IsExternalString()) {
1230 5891 : heap_->FinalizeExternalString(String::cast(*p));
1231 : } else {
1232 : // The original external string may have been internalized.
1233 : DCHECK(o->IsThinString());
1234 : }
1235 : // Set the entry to the_hole_value (as deleted).
1236 5891 : *p = the_hole;
1237 : }
1238 : }
1239 : }
1240 56941 : }
1241 :
1242 : private:
1243 : Heap* heap_;
1244 : };
1245 :
1246 : // Helper class for pruning the string table.
1247 0 : class YoungGenerationExternalStringTableCleaner : public RootVisitor {
1248 : public:
1249 : YoungGenerationExternalStringTableCleaner(
1250 : MinorMarkCompactCollector* collector)
1251 0 : : heap_(collector->heap()),
1252 0 : marking_state_(collector->non_atomic_marking_state()) {}
1253 :
1254 0 : void VisitRootPointers(Root root, Object** start, Object** end) override {
1255 : DCHECK_EQ(static_cast<int>(root),
1256 : static_cast<int>(Root::kExternalStringsTable));
1257 : // Visit all HeapObject pointers in [start, end).
1258 0 : for (Object** p = start; p < end; p++) {
1259 0 : Object* o = *p;
1260 0 : if (o->IsHeapObject()) {
1261 : HeapObject* heap_object = HeapObject::cast(o);
1262 0 : if (marking_state_->IsWhite(heap_object)) {
1263 0 : if (o->IsExternalString()) {
1264 0 : heap_->FinalizeExternalString(String::cast(*p));
1265 : } else {
1266 : // The original external string may have been internalized.
1267 : DCHECK(o->IsThinString());
1268 : }
1269 : // Set the entry to the_hole_value (as deleted).
1270 0 : *p = heap_->the_hole_value();
1271 : }
1272 : }
1273 : }
1274 0 : }
1275 :
1276 : private:
1277 : Heap* heap_;
1278 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
1279 : };
1280 :
1281 : // Marked young generation objects and all old generation objects will be
1282 : // retained.
1283 0 : class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1284 : public:
1285 : explicit MinorMarkCompactWeakObjectRetainer(
1286 : MinorMarkCompactCollector* collector)
1287 0 : : heap_(collector->heap()),
1288 0 : marking_state_(collector->non_atomic_marking_state()) {}
1289 :
1290 0 : virtual Object* RetainAs(Object* object) {
1291 : HeapObject* heap_object = HeapObject::cast(object);
1292 0 : if (!heap_->InNewSpace(heap_object)) return object;
1293 :
1294 : // Young generation marking only marks to grey instead of black.
1295 : DCHECK(!marking_state_->IsBlack(heap_object));
1296 0 : if (marking_state_->IsGrey(heap_object)) {
1297 0 : return object;
1298 : }
1299 : return nullptr;
1300 : }
1301 :
1302 : private:
1303 : Heap* heap_;
1304 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
1305 : };
1306 :
1307 : // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1308 : // are retained.
1309 56800 : class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1310 : public:
1311 : explicit MarkCompactWeakObjectRetainer(
1312 : MarkCompactCollector::NonAtomicMarkingState* marking_state)
1313 56800 : : marking_state_(marking_state) {}
1314 :
1315 908611 : virtual Object* RetainAs(Object* object) {
1316 : HeapObject* heap_object = HeapObject::cast(object);
1317 : DCHECK(!marking_state_->IsGrey(heap_object));
1318 908611 : if (marking_state_->IsBlack(heap_object)) {
1319 : return object;
1320 454891 : } else if (object->IsAllocationSite() &&
1321 : !(AllocationSite::cast(object)->IsZombie())) {
1322 : // "dead" AllocationSites need to live long enough for a traversal of new
1323 : // space. These sites get a one-time reprieve.
1324 : AllocationSite* site = AllocationSite::cast(object);
1325 : site->MarkZombie();
1326 : marking_state_->WhiteToBlack(site);
1327 93131 : return object;
1328 : } else {
1329 : return nullptr;
1330 : }
1331 : }
1332 :
1333 : private:
1334 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1335 : };
1336 :
1337 47003 : class RecordMigratedSlotVisitor : public ObjectVisitor {
1338 : public:
1339 : explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1340 47061 : : collector_(collector) {}
1341 :
1342 0 : inline void VisitPointer(HeapObject* host, Object** p) final {
1343 202485 : RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
1344 0 : }
1345 :
1346 1251 : inline void VisitPointers(HeapObject* host, Object** start,
1347 : Object** end) final {
1348 105684057 : while (start < end) {
1349 96082350 : RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
1350 96086916 : ++start;
1351 : }
1352 1251 : }
1353 :
1354 2982 : inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
1355 : DCHECK_EQ(host, rinfo->host());
1356 : DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
1357 2982 : Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1358 : // The target is always in old space, we don't have to record the slot in
1359 : // the old-to-new remembered set.
1360 : DCHECK(!collector_->heap()->InNewSpace(target));
1361 2982 : collector_->RecordRelocSlot(host, rinfo, target);
1362 2982 : }
1363 :
1364 695 : inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
1365 : DCHECK_EQ(host, rinfo->host());
1366 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1367 : HeapObject* object = HeapObject::cast(rinfo->target_object());
1368 695 : collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
1369 695 : collector_->RecordRelocSlot(host, rinfo, object);
1370 695 : }
1371 :
1372 : // Entries that are skipped for recording.
1373 0 : inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
1374 0 : inline void VisitExternalReference(Foreign* host, Address* p) final {}
1375 0 : inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
1376 0 : inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
1377 :
1378 : protected:
1379 96223085 : inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
1380 : Address slot) {
1381 96223085 : if (value->IsHeapObject()) {
1382 : Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1383 153853778 : if (p->InNewSpace()) {
1384 : DCHECK_IMPLIES(p->InToSpace(),
1385 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1386 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1387 3617457 : Page::FromAddress(slot), slot);
1388 73309432 : } else if (p->IsEvacuationCandidate()) {
1389 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1390 3629141 : Page::FromAddress(slot), slot);
1391 : }
1392 : }
1393 96223855 : }
1394 :
1395 : MarkCompactCollector* collector_;
1396 : };
1397 :
1398 : class MigrationObserver {
1399 : public:
1400 47003 : explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1401 :
1402 0 : virtual ~MigrationObserver() {}
1403 : virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1404 : int size) = 0;
1405 :
1406 : protected:
1407 : Heap* heap_;
1408 : };
1409 :
1410 0 : class ProfilingMigrationObserver final : public MigrationObserver {
1411 : public:
1412 47003 : explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1413 :
1414 264880 : inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1415 : int size) final {
1416 423501 : if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1417 384 : PROFILE(heap_->isolate(),
1418 : CodeMoveEvent(AbstractCode::cast(src), dst->address()));
1419 : }
1420 264626 : heap_->OnMoveEvent(dst, src, size);
1421 264369 : }
1422 : };
1423 :
1424 0 : class YoungGenerationMigrationObserver final : public MigrationObserver {
1425 : public:
1426 : YoungGenerationMigrationObserver(Heap* heap,
1427 : MarkCompactCollector* mark_compact_collector)
1428 : : MigrationObserver(heap),
1429 0 : mark_compact_collector_(mark_compact_collector) {}
1430 :
1431 0 : inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1432 : int size) final {
1433 : // Migrate color to old generation marking in case the object survived young
1434 : // generation garbage collection.
1435 0 : if (heap_->incremental_marking()->IsMarking()) {
1436 : DCHECK(
1437 : heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
1438 : heap_->incremental_marking()->TransferColor(src, dst);
1439 : }
1440 0 : }
1441 :
1442 : protected:
1443 : base::Mutex mutex_;
1444 : MarkCompactCollector* mark_compact_collector_;
1445 : };
1446 :
1447 0 : class YoungGenerationRecordMigratedSlotVisitor final
1448 : : public RecordMigratedSlotVisitor {
1449 : public:
1450 : explicit YoungGenerationRecordMigratedSlotVisitor(
1451 : MarkCompactCollector* collector)
1452 0 : : RecordMigratedSlotVisitor(collector) {}
1453 :
1454 0 : void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
1455 0 : void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
1456 0 : UNREACHABLE();
1457 : }
1458 :
1459 : private:
1460 : // Only record slots for host objects that are considered as live by the full
1461 : // collector.
1462 0 : inline bool IsLive(HeapObject* object) {
1463 0 : return collector_->non_atomic_marking_state()->IsBlack(object);
1464 : }
1465 :
1466 0 : inline void RecordMigratedSlot(HeapObject* host, Object* value,
1467 : Address slot) final {
1468 0 : if (value->IsHeapObject()) {
1469 : Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1470 0 : if (p->InNewSpace()) {
1471 : DCHECK_IMPLIES(p->InToSpace(),
1472 : p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1473 : RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1474 0 : Page::FromAddress(slot), slot);
1475 0 : } else if (p->IsEvacuationCandidate() && IsLive(host)) {
1476 : RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1477 0 : Page::FromAddress(slot), slot);
1478 : }
1479 : }
1480 0 : }
1481 : };
1482 :
1483 224276 : class HeapObjectVisitor {
1484 : public:
1485 224276 : virtual ~HeapObjectVisitor() {}
1486 : virtual bool Visit(HeapObject* object, int size) = 0;
1487 : };
1488 :
1489 112124 : class EvacuateVisitorBase : public HeapObjectVisitor {
1490 : public:
1491 : void AddObserver(MigrationObserver* observer) {
1492 512 : migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1493 512 : observers_.push_back(observer);
1494 : }
1495 :
1496 : protected:
1497 : enum MigrationMode { kFast, kObserved };
1498 :
1499 : typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
1500 : HeapObject* src, int size,
1501 : AllocationSpace dest);
1502 :
1503 : template <MigrationMode mode>
1504 29554718 : static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
1505 : HeapObject* src, int size,
1506 : AllocationSpace dest) {
1507 29554718 : Address dst_addr = dst->address();
1508 29554718 : Address src_addr = src->address();
1509 : DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1510 : DCHECK(dest != LO_SPACE);
1511 29554718 : if (dest == OLD_SPACE) {
1512 : DCHECK_OBJECT_SIZE(size);
1513 : DCHECK(IsAligned(size, kPointerSize));
1514 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1515 : if (mode != MigrationMode::kFast)
1516 : base->ExecuteMigrationObservers(dest, src, dst, size);
1517 : dst->IterateBodyFast(dst->map()->instance_type(), size,
1518 12216527 : base->record_visitor_);
1519 17371173 : } else if (dest == CODE_SPACE) {
1520 : DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1521 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1522 1193 : Code::cast(dst)->Relocate(dst_addr - src_addr);
1523 : if (mode != MigrationMode::kFast)
1524 : base->ExecuteMigrationObservers(dest, src, dst, size);
1525 : dst->IterateBodyFast(dst->map()->instance_type(), size,
1526 1193 : base->record_visitor_);
1527 : } else {
1528 : DCHECK_OBJECT_SIZE(size);
1529 : DCHECK(dest == NEW_SPACE);
1530 : base->heap_->CopyBlock(dst_addr, src_addr, size);
1531 : if (mode != MigrationMode::kFast)
1532 : base->ExecuteMigrationObservers(dest, src, dst, size);
1533 : }
1534 : base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1535 : reinterpret_cast<base::AtomicWord>(dst_addr));
1536 29591036 : }
1537 :
1538 : EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1539 : RecordMigratedSlotVisitor* record_visitor)
1540 : : heap_(heap),
1541 : local_allocator_(local_allocator),
1542 112124 : record_visitor_(record_visitor) {
1543 112124 : migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1544 : }
1545 :
1546 12193758 : inline bool TryEvacuateObject(AllocationSpace target_space,
1547 : HeapObject* object, int size,
1548 : HeapObject** target_object) {
1549 : #ifdef VERIFY_HEAP
1550 : if (AbortCompactionForTesting(object)) return false;
1551 : #endif // VERIFY_HEAP
1552 : AllocationAlignment alignment = object->RequiredAlignment();
1553 : AllocationResult allocation =
1554 12193758 : local_allocator_->Allocate(target_space, size, alignment);
1555 12210321 : if (allocation.To(target_object)) {
1556 12212426 : MigrateObject(*target_object, object, size, target_space);
1557 12210203 : return true;
1558 : }
1559 : return false;
1560 : }
1561 :
1562 : inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
1563 : HeapObject* dst, int size) {
1564 795513 : for (MigrationObserver* obs : observers_) {
1565 265346 : obs->Move(dest, src, dst, size);
1566 : }
1567 : }
1568 :
1569 : inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1570 : AllocationSpace dest) {
1571 29590221 : migration_function_(this, dst, src, size, dest);
1572 : }
1573 :
1574 : #ifdef VERIFY_HEAP
1575 : bool AbortCompactionForTesting(HeapObject* object) {
1576 : if (FLAG_stress_compaction) {
1577 : const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1578 : Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1579 : if ((reinterpret_cast<uintptr_t>(object->address()) &
1580 : Page::kPageAlignmentMask) == mask) {
1581 : Page* page = Page::FromAddress(object->address());
1582 : if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1583 : page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1584 : } else {
1585 : page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1586 : return true;
1587 : }
1588 : }
1589 : }
1590 : return false;
1591 : }
1592 : #endif // VERIFY_HEAP
1593 :
1594 : Heap* heap_;
1595 : LocalAllocator* local_allocator_;
1596 : RecordMigratedSlotVisitor* record_visitor_;
1597 : std::vector<MigrationObserver*> observers_;
1598 : MigrateFunction migration_function_;
1599 : };
1600 :
1601 112124 : class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1602 : public:
1603 56062 : explicit EvacuateNewSpaceVisitor(
1604 : Heap* heap, LocalAllocator* local_allocator,
1605 : RecordMigratedSlotVisitor* record_visitor,
1606 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1607 : : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1608 : buffer_(LocalAllocationBuffer::InvalidBuffer()),
1609 : promoted_size_(0),
1610 : semispace_copied_size_(0),
1611 112124 : local_pretenuring_feedback_(local_pretenuring_feedback) {}
1612 :
1613 24438559 : inline bool Visit(HeapObject* object, int size) override {
1614 24438559 : HeapObject* target_object = nullptr;
1615 31547490 : if (heap_->ShouldBePromoted(object->address()) &&
1616 7109439 : TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1617 7109022 : promoted_size_ += size;
1618 7109022 : return true;
1619 : }
1620 : heap_->UpdateAllocationSite(object->map(), object,
1621 34810222 : local_pretenuring_feedback_);
1622 17404992 : HeapObject* target = nullptr;
1623 17404992 : AllocationSpace space = AllocateTargetObject(object, size, &target);
1624 17377795 : MigrateObject(HeapObject::cast(target), object, size, space);
1625 17372658 : semispace_copied_size_ += size;
1626 17372658 : return true;
1627 : }
1628 :
1629 : intptr_t promoted_size() { return promoted_size_; }
1630 : intptr_t semispace_copied_size() { return semispace_copied_size_; }
1631 :
1632 : private:
1633 17409073 : inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
1634 1452 : HeapObject** target_object) {
1635 : AllocationAlignment alignment = old_object->RequiredAlignment();
1636 : AllocationSpace space_allocated_in = NEW_SPACE;
1637 : AllocationResult allocation =
1638 17409073 : local_allocator_->Allocate(NEW_SPACE, size, alignment);
1639 17383335 : if (allocation.IsRetry()) {
1640 1452 : allocation = AllocateInOldSpace(size, alignment);
1641 : space_allocated_in = OLD_SPACE;
1642 : }
1643 : bool ok = allocation.To(target_object);
1644 : DCHECK(ok);
1645 : USE(ok);
1646 17383335 : return space_allocated_in;
1647 : }
1648 :
1649 1452 : inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1650 : AllocationAlignment alignment) {
1651 : AllocationResult allocation =
1652 1452 : local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1653 1452 : if (allocation.IsRetry()) {
1654 : v8::internal::Heap::FatalProcessOutOfMemory(
1655 0 : "MarkCompactCollector: semi-space copy, fallback in old gen", true);
1656 : }
1657 1452 : return allocation;
1658 : }
1659 :
1660 : LocalAllocationBuffer buffer_;
1661 : intptr_t promoted_size_;
1662 : intptr_t semispace_copied_size_;
1663 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1664 : };
1665 :
1666 : template <PageEvacuationMode mode>
1667 112124 : class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1668 : public:
1669 : explicit EvacuateNewSpacePageVisitor(
1670 : Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1671 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1672 : : heap_(heap),
1673 : record_visitor_(record_visitor),
1674 : moved_bytes_(0),
1675 112124 : local_pretenuring_feedback_(local_pretenuring_feedback) {}
1676 :
1677 1572 : static void Move(Page* page) {
1678 : switch (mode) {
1679 : case NEW_TO_NEW:
1680 995 : page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1681 : page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1682 : break;
1683 : case NEW_TO_OLD: {
1684 577 : page->Unlink();
1685 577 : Page* new_page = Page::ConvertNewToOld(page);
1686 : DCHECK(!new_page->InNewSpace());
1687 : new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1688 : break;
1689 : }
1690 : }
1691 1572 : }
1692 :
1693 3779063 : inline bool Visit(HeapObject* object, int size) {
1694 : if (mode == NEW_TO_NEW) {
1695 3779063 : heap_->UpdateAllocationSite(object->map(), object,
1696 7558126 : local_pretenuring_feedback_);
1697 : } else if (mode == NEW_TO_OLD) {
1698 4696916 : object->IterateBodyFast(record_visitor_);
1699 : }
1700 3771857 : return true;
1701 : }
1702 :
1703 : intptr_t moved_bytes() { return moved_bytes_; }
1704 1572 : void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1705 :
1706 : private:
1707 : Heap* heap_;
1708 : RecordMigratedSlotVisitor* record_visitor_;
1709 : intptr_t moved_bytes_;
1710 : Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1711 : };
1712 :
1713 56062 : class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1714 : public:
1715 : EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1716 : RecordMigratedSlotVisitor* record_visitor)
1717 56062 : : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1718 :
1719 5116095 : inline bool Visit(HeapObject* object, int size) override {
1720 5116095 : HeapObject* target_object = nullptr;
1721 5116095 : if (TryEvacuateObject(
1722 5116095 : Page::FromAddress(object->address())->owner()->identity(), object,
1723 5116095 : size, &target_object)) {
1724 : DCHECK(object->map_word().IsForwardingAddress());
1725 : return true;
1726 : }
1727 28 : return false;
1728 : }
1729 : };
1730 :
1731 28 : class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1732 : public:
1733 28 : explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1734 :
1735 0 : inline bool Visit(HeapObject* object, int size) {
1736 58 : RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1737 58 : object->IterateBody(&visitor);
1738 0 : return true;
1739 : }
1740 :
1741 : private:
1742 : Heap* heap_;
1743 : };
1744 :
1745 337543 : bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1746 337543 : Object* o = *p;
1747 337543 : if (!o->IsHeapObject()) return false;
1748 : HeapObject* heap_object = HeapObject::cast(o);
1749 : return heap_object->GetHeap()
1750 : ->mark_compact_collector()
1751 : ->non_atomic_marking_state()
1752 337543 : ->IsWhite(HeapObject::cast(o));
1753 : }
1754 :
1755 56800 : void MarkCompactCollector::MarkStringTable(
1756 : ObjectVisitor* custom_root_body_visitor) {
1757 56800 : StringTable* string_table = heap()->string_table();
1758 : // Mark the string table itself.
1759 56800 : if (atomic_marking_state()->WhiteToBlack(string_table)) {
1760 : // Explicitly mark the prefix.
1761 56758 : string_table->IteratePrefix(custom_root_body_visitor);
1762 : }
1763 56800 : }
1764 :
1765 56800 : void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1766 : ObjectVisitor* custom_root_body_visitor) {
1767 : // Mark the heap roots including global variables, stack variables,
1768 : // etc., and all objects reachable from them.
1769 56800 : heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1770 :
1771 : // Custom marking for string table and top optimized frame.
1772 56800 : MarkStringTable(custom_root_body_visitor);
1773 56800 : ProcessTopOptimizedFrame(custom_root_body_visitor);
1774 56800 : }
1775 :
1776 340857 : void MarkCompactCollector::ProcessMarkingWorklist() {
1777 : HeapObject* object;
1778 340857 : MarkCompactMarkingVisitor visitor(this, atomic_marking_state());
1779 206908208 : while ((object = marking_worklist()->Pop()) != nullptr) {
1780 : DCHECK(!object->IsFiller());
1781 : DCHECK(object->IsHeapObject());
1782 : DCHECK(heap()->Contains(object));
1783 : DCHECK(!(atomic_marking_state()->IsWhite(object)));
1784 : atomic_marking_state()->GreyToBlack(object);
1785 : Map* map = object->map();
1786 : MarkObject(object, map);
1787 : visitor.Visit(map, object);
1788 : }
1789 : DCHECK(marking_worklist()->IsBailoutEmpty());
1790 340857 : }
1791 :
1792 : // Mark all objects reachable (transitively) from objects on the marking
1793 : // stack including references only considered in the atomic marking pause.
1794 113600 : void MarkCompactCollector::ProcessEphemeralMarking(
1795 : bool only_process_harmony_weak_collections) {
1796 : DCHECK(marking_worklist()->IsEmpty());
1797 : bool work_to_do = true;
1798 340857 : while (work_to_do) {
1799 113657 : if (!only_process_harmony_weak_collections) {
1800 113657 : if (heap_->local_embedder_heap_tracer()->InUse()) {
1801 113600 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
1802 0 : heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
1803 : heap_->local_embedder_heap_tracer()->Trace(
1804 : 0,
1805 : EmbedderHeapTracer::AdvanceTracingActions(
1806 0 : EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
1807 : }
1808 : } else {
1809 : // TODO(mlippautz): We currently do not trace through blink when
1810 : // discovering new objects reachable from weak roots (that have been made
1811 : // strong). This is a limitation of not having a separate handle type
1812 : // that doesn't require zapping before this phase. See crbug.com/668060.
1813 56800 : heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
1814 : }
1815 113657 : ProcessWeakCollections();
1816 113657 : work_to_do = !marking_worklist()->IsEmpty();
1817 113657 : ProcessMarkingWorklist();
1818 : }
1819 113600 : CHECK(marking_worklist()->IsEmpty());
1820 227200 : CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
1821 113600 : }
1822 :
1823 56800 : void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1824 148441 : for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1825 34841 : !it.done(); it.Advance()) {
1826 58423 : if (it.frame()->type() == StackFrame::INTERPRETED) {
1827 : return;
1828 : }
1829 37384 : if (it.frame()->type() == StackFrame::OPTIMIZED) {
1830 2543 : Code* code = it.frame()->LookupCode();
1831 5086 : if (!code->CanDeoptAt(it.frame()->pc())) {
1832 387 : Code::BodyDescriptor::IterateBody(code, visitor);
1833 : }
1834 : return;
1835 : }
1836 : }
1837 : }
1838 :
1839 0 : class ObjectStatsVisitor : public HeapObjectVisitor {
1840 : public:
1841 0 : ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
1842 : ObjectStats* dead_stats)
1843 : : live_collector_(heap, live_stats),
1844 : dead_collector_(heap, dead_stats),
1845 : marking_state_(
1846 0 : heap->mark_compact_collector()->non_atomic_marking_state()) {
1847 : DCHECK_NOT_NULL(live_stats);
1848 : DCHECK_NOT_NULL(dead_stats);
1849 : // Global objects are roots and thus recorded as live.
1850 0 : live_collector_.CollectGlobalStatistics();
1851 0 : }
1852 :
1853 0 : bool Visit(HeapObject* obj, int size) override {
1854 0 : if (marking_state_->IsBlack(obj)) {
1855 0 : live_collector_.CollectStatistics(obj);
1856 : } else {
1857 : DCHECK(!marking_state_->IsGrey(obj));
1858 0 : dead_collector_.CollectStatistics(obj);
1859 : }
1860 0 : return true;
1861 : }
1862 :
1863 : private:
1864 : ObjectStatsCollector live_collector_;
1865 : ObjectStatsCollector dead_collector_;
1866 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1867 : };
1868 :
1869 0 : void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
1870 0 : SpaceIterator space_it(heap());
1871 : HeapObject* obj = nullptr;
1872 0 : while (space_it.has_next()) {
1873 0 : std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
1874 : ObjectIterator* obj_it = it.get();
1875 0 : while ((obj = obj_it->Next()) != nullptr) {
1876 0 : visitor->Visit(obj, obj->Size());
1877 : }
1878 0 : }
1879 0 : }
1880 :
1881 56800 : void MarkCompactCollector::RecordObjectStats() {
1882 56800 : if (V8_UNLIKELY(FLAG_gc_stats)) {
1883 0 : heap()->CreateObjectStats();
1884 : ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
1885 0 : heap()->dead_object_stats_);
1886 0 : VisitAllObjects(&visitor);
1887 0 : if (V8_UNLIKELY(FLAG_gc_stats &
1888 : v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1889 0 : std::stringstream live, dead;
1890 0 : heap()->live_object_stats_->Dump(live);
1891 0 : heap()->dead_object_stats_->Dump(dead);
1892 0 : TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1893 : "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1894 : "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1895 0 : TRACE_STR_COPY(dead.str().c_str()));
1896 : }
1897 0 : if (FLAG_trace_gc_object_stats) {
1898 0 : heap()->live_object_stats_->PrintJSON("live");
1899 0 : heap()->dead_object_stats_->PrintJSON("dead");
1900 : }
1901 0 : heap()->live_object_stats_->CheckpointObjectStats();
1902 0 : heap()->dead_object_stats_->ClearObjectStats();
1903 : }
1904 56800 : }
1905 :
1906 106730 : class YoungGenerationMarkingVisitor final
1907 : : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
1908 : public:
1909 : YoungGenerationMarkingVisitor(
1910 : Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
1911 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
1912 : : heap_(heap),
1913 : worklist_(global_worklist, task_id),
1914 109998 : marking_state_(marking_state) {}
1915 :
1916 0 : V8_INLINE void VisitPointers(HeapObject* host, Object** start,
1917 : Object** end) final {
1918 0 : for (Object** p = start; p < end; p++) {
1919 : VisitPointer(host, p);
1920 : }
1921 0 : }
1922 :
1923 0 : V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
1924 0 : Object* target = *slot;
1925 0 : if (heap_->InNewSpace(target)) {
1926 : HeapObject* target_object = HeapObject::cast(target);
1927 0 : MarkObjectViaMarkingWorklist(target_object);
1928 : }
1929 0 : }
1930 :
1931 : private:
1932 0 : inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
1933 0 : if (marking_state_->WhiteToGrey(object)) {
1934 : // Marking deque overflow is unsupported for the young generation.
1935 0 : CHECK(worklist_.Push(object));
1936 : }
1937 0 : }
1938 :
1939 : Heap* heap_;
1940 : MinorMarkCompactCollector::MarkingWorklist::View worklist_;
1941 : MinorMarkCompactCollector::MarkingState* marking_state_;
1942 : };
1943 :
1944 0 : class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
1945 : public:
1946 : explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
1947 : : collector_(collector),
1948 0 : marking_state_(collector_->non_atomic_marking_state()) {}
1949 :
1950 0 : void VisitRootPointer(Root root, Object** p) override {
1951 0 : MarkObjectByPointer(p);
1952 0 : }
1953 :
1954 0 : void VisitRootPointers(Root root, Object** start, Object** end) override {
1955 0 : for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1956 0 : }
1957 :
1958 : private:
1959 0 : void MarkObjectByPointer(Object** p) {
1960 0 : if (!(*p)->IsHeapObject()) return;
1961 :
1962 : HeapObject* object = HeapObject::cast(*p);
1963 :
1964 0 : if (!collector_->heap()->InNewSpace(object)) return;
1965 :
1966 0 : if (marking_state_->WhiteToGrey(object)) {
1967 0 : collector_->main_marking_visitor()->Visit(object);
1968 0 : collector_->ProcessMarkingWorklist();
1969 : }
1970 : }
1971 :
1972 : MinorMarkCompactCollector* collector_;
1973 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
1974 : };
1975 :
1976 : class MarkingItem;
1977 : class GlobalHandlesMarkingItem;
1978 : class PageMarkingItem;
1979 : class RootMarkingItem;
1980 : class YoungGenerationMarkingTask;
1981 :
1982 0 : class MarkingItem : public ItemParallelJob::Item {
1983 : public:
1984 0 : virtual ~MarkingItem() {}
1985 : virtual void Process(YoungGenerationMarkingTask* task) = 0;
1986 : };
1987 :
1988 0 : class YoungGenerationMarkingTask : public ItemParallelJob::Task {
1989 : public:
1990 0 : YoungGenerationMarkingTask(
1991 : Isolate* isolate, MinorMarkCompactCollector* collector,
1992 : MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
1993 : : ItemParallelJob::Task(isolate),
1994 : collector_(collector),
1995 : marking_worklist_(global_worklist, task_id),
1996 0 : marking_state_(collector->marking_state()),
1997 0 : visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
1998 0 : local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
1999 0 : Page::kPageSize);
2000 0 : }
2001 :
2002 0 : void RunInParallel() override {
2003 0 : double marking_time = 0.0;
2004 : {
2005 : TimedScope scope(&marking_time);
2006 : MarkingItem* item = nullptr;
2007 0 : while ((item = GetItem<MarkingItem>()) != nullptr) {
2008 0 : item->Process(this);
2009 0 : item->MarkFinished();
2010 0 : EmptyLocalMarkingWorklist();
2011 : }
2012 0 : EmptyMarkingWorklist();
2013 : DCHECK(marking_worklist_.IsLocalEmpty());
2014 0 : FlushLiveBytes();
2015 : }
2016 0 : if (FLAG_trace_minor_mc_parallel_marking) {
2017 0 : PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
2018 0 : static_cast<void*>(this), marking_time);
2019 : }
2020 0 : };
2021 :
2022 0 : void MarkObject(Object* object) {
2023 0 : if (!collector_->heap()->InNewSpace(object)) return;
2024 : HeapObject* heap_object = HeapObject::cast(object);
2025 0 : if (marking_state_->WhiteToGrey(heap_object)) {
2026 : const int size = visitor_.Visit(heap_object);
2027 0 : IncrementLiveBytes(heap_object, size);
2028 : }
2029 : }
2030 :
2031 : private:
2032 0 : void EmptyLocalMarkingWorklist() {
2033 0 : HeapObject* object = nullptr;
2034 0 : while (marking_worklist_.Pop(&object)) {
2035 0 : const int size = visitor_.Visit(object);
2036 0 : IncrementLiveBytes(object, size);
2037 : }
2038 0 : }
2039 :
2040 0 : void EmptyMarkingWorklist() {
2041 0 : HeapObject* object = nullptr;
2042 0 : while (marking_worklist_.Pop(&object)) {
2043 0 : const int size = visitor_.Visit(object);
2044 0 : IncrementLiveBytes(object, size);
2045 : }
2046 0 : }
2047 :
2048 : void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
2049 0 : local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
2050 0 : bytes;
2051 : }
2052 :
2053 0 : void FlushLiveBytes() {
2054 0 : for (auto pair : local_live_bytes_) {
2055 : marking_state_->IncrementLiveBytes(pair.first, pair.second);
2056 : }
2057 0 : }
2058 :
2059 : MinorMarkCompactCollector* collector_;
2060 : MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
2061 : MinorMarkCompactCollector::MarkingState* marking_state_;
2062 : YoungGenerationMarkingVisitor visitor_;
2063 : std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
2064 : };
2065 :
2066 : class BatchedRootMarkingItem : public MarkingItem {
2067 : public:
2068 : explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
2069 0 : : objects_(objects) {}
2070 0 : virtual ~BatchedRootMarkingItem() {}
2071 :
2072 0 : void Process(YoungGenerationMarkingTask* task) override {
2073 0 : for (Object* object : objects_) {
2074 0 : task->MarkObject(object);
2075 : }
2076 0 : }
2077 :
2078 : private:
2079 : std::vector<Object*> objects_;
2080 : };
2081 :
2082 : class PageMarkingItem : public MarkingItem {
2083 : public:
2084 : explicit PageMarkingItem(MemoryChunk* chunk,
2085 : base::AtomicNumber<intptr_t>* global_slots)
2086 0 : : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
2087 0 : virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
2088 :
2089 0 : void Process(YoungGenerationMarkingTask* task) override {
2090 0 : base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
2091 : MarkUntypedPointers(task);
2092 0 : MarkTypedPointers(task);
2093 0 : }
2094 :
2095 : private:
2096 0 : inline Heap* heap() { return chunk_->heap(); }
2097 :
2098 : void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
2099 : RememberedSet<OLD_TO_NEW>::Iterate(
2100 : chunk_,
2101 0 : [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
2102 0 : SlotSet::PREFREE_EMPTY_BUCKETS);
2103 : }
2104 :
2105 0 : void MarkTypedPointers(YoungGenerationMarkingTask* task) {
2106 : Isolate* isolate = heap()->isolate();
2107 : RememberedSet<OLD_TO_NEW>::IterateTyped(
2108 : chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
2109 0 : Address slot) {
2110 : return UpdateTypedSlotHelper::UpdateTypedSlot(
2111 : isolate, slot_type, slot, [this, task](Object** slot) {
2112 : return CheckAndMarkObject(task,
2113 0 : reinterpret_cast<Address>(slot));
2114 0 : });
2115 0 : });
2116 0 : }
2117 :
2118 0 : SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
2119 : Address slot_address) {
2120 0 : Object* object = *reinterpret_cast<Object**>(slot_address);
2121 0 : if (heap()->InNewSpace(object)) {
2122 : // Marking happens before flipping the young generation, so the object
2123 : // has to be in ToSpace.
2124 : DCHECK(heap()->InToSpace(object));
2125 : HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
2126 0 : task->MarkObject(heap_object);
2127 0 : slots_++;
2128 : return KEEP_SLOT;
2129 : }
2130 : return REMOVE_SLOT;
2131 : }
2132 :
2133 : MemoryChunk* chunk_;
2134 : base::AtomicNumber<intptr_t>* global_slots_;
2135 : intptr_t slots_;
2136 : };
2137 :
2138 : class GlobalHandlesMarkingItem : public MarkingItem {
2139 : public:
2140 : GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
2141 : size_t end)
2142 0 : : global_handles_(global_handles), start_(start), end_(end) {}
2143 0 : virtual ~GlobalHandlesMarkingItem() {}
2144 :
2145 0 : void Process(YoungGenerationMarkingTask* task) override {
2146 : GlobalHandlesRootMarkingVisitor visitor(task);
2147 : global_handles_
2148 : ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
2149 0 : &visitor, start_, end_);
2150 0 : }
2151 :
2152 : private:
2153 0 : class GlobalHandlesRootMarkingVisitor : public RootVisitor {
2154 : public:
2155 : explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
2156 0 : : task_(task) {}
2157 :
2158 0 : void VisitRootPointer(Root root, Object** p) override {
2159 : DCHECK_EQ(Root::kGlobalHandles, root);
2160 0 : task_->MarkObject(*p);
2161 0 : }
2162 :
2163 0 : void VisitRootPointers(Root root, Object** start, Object** end) override {
2164 : DCHECK_EQ(Root::kGlobalHandles, root);
2165 0 : for (Object** p = start; p < end; p++) {
2166 0 : task_->MarkObject(*p);
2167 : }
2168 0 : }
2169 :
2170 : private:
2171 : YoungGenerationMarkingTask* task_;
2172 : };
2173 :
2174 : GlobalHandles* global_handles_;
2175 : size_t start_;
2176 : size_t end_;
2177 : };
2178 :
2179 54999 : MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
2180 : : MarkCompactCollectorBase(heap),
2181 : worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
2182 : main_marking_visitor_(new YoungGenerationMarkingVisitor(
2183 54999 : heap, marking_state(), worklist_, kMainMarker)),
2184 164997 : page_parallel_job_semaphore_(0) {
2185 : static_assert(
2186 : kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
2187 : "more marker tasks than marking deque can handle");
2188 54999 : }
2189 :
2190 160095 : MinorMarkCompactCollector::~MinorMarkCompactCollector() {
2191 53365 : delete worklist_;
2192 53365 : delete main_marking_visitor_;
2193 106730 : }
2194 :
2195 0 : static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
2196 : DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
2197 0 : return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
2198 : ->non_atomic_marking_state()
2199 0 : ->IsGrey(HeapObject::cast(*p));
2200 : }
2201 :
2202 : template <class ParallelItem>
2203 0 : static void SeedGlobalHandles(GlobalHandles* global_handles,
2204 : ItemParallelJob* job) {
2205 : // Create batches of global handles.
2206 : const size_t kGlobalHandlesBufferSize = 1000;
2207 : const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
2208 0 : for (size_t start = 0; start < new_space_nodes;
2209 : start += kGlobalHandlesBufferSize) {
2210 0 : size_t end = start + kGlobalHandlesBufferSize;
2211 0 : if (end > new_space_nodes) end = new_space_nodes;
2212 0 : job->AddItem(new ParallelItem(global_handles, start, end));
2213 : }
2214 0 : }
2215 :
2216 0 : void MinorMarkCompactCollector::MarkRootSetInParallel() {
2217 : base::AtomicNumber<intptr_t> slots;
2218 : {
2219 : ItemParallelJob job(isolate()->cancelable_task_manager(),
2220 0 : &page_parallel_job_semaphore_);
2221 :
2222 : // Seed the root set (roots + old->new set).
2223 : {
2224 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
2225 : // Create batches of roots.
2226 : RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
2227 : &job);
2228 0 : heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
2229 : // Create batches of global handles.
2230 : SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
2231 0 : &job);
2232 : // Create items for each page.
2233 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
2234 0 : heap(), [&job, &slots](MemoryChunk* chunk) {
2235 0 : job.AddItem(new PageMarkingItem(chunk, &slots));
2236 0 : });
2237 : // Flush any remaining objects in the seeding visitor.
2238 0 : root_seed_visitor.FlushObjects();
2239 : }
2240 :
2241 : // Add tasks and run in parallel.
2242 : {
2243 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
2244 : const int new_space_pages =
2245 0 : static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
2246 0 : const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
2247 0 : for (int i = 0; i < num_tasks; i++) {
2248 : job.AddTask(
2249 0 : new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
2250 : }
2251 0 : job.Run();
2252 0 : DCHECK(worklist()->IsGlobalEmpty());
2253 0 : }
2254 : }
2255 0 : old_to_new_slots_ = static_cast<int>(slots.Value());
2256 0 : }
2257 :
2258 0 : void MinorMarkCompactCollector::MarkLiveObjects() {
2259 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
2260 :
2261 : PostponeInterruptsScope postpone(isolate());
2262 :
2263 : RootMarkingVisitor root_visitor(this);
2264 :
2265 0 : MarkRootSetInParallel();
2266 :
2267 : // Mark rest on the main thread.
2268 : {
2269 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
2270 0 : heap()->IterateEncounteredWeakCollections(&root_visitor);
2271 0 : ProcessMarkingWorklist();
2272 : }
2273 :
2274 : {
2275 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
2276 : isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
2277 0 : &IsUnmarkedObjectForYoungGeneration);
2278 : isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
2279 0 : &root_visitor);
2280 0 : ProcessMarkingWorklist();
2281 0 : }
2282 0 : }
2283 :
2284 0 : void MinorMarkCompactCollector::ProcessMarkingWorklist() {
2285 : MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
2286 0 : HeapObject* object = nullptr;
2287 0 : while (marking_worklist.Pop(&object)) {
2288 : DCHECK(!object->IsFiller());
2289 : DCHECK(object->IsHeapObject());
2290 : DCHECK(heap()->Contains(object));
2291 : DCHECK(non_atomic_marking_state()->IsGrey(object));
2292 0 : main_marking_visitor()->Visit(object);
2293 : }
2294 : DCHECK(marking_worklist.IsLocalEmpty());
2295 0 : }
2296 :
2297 0 : void MinorMarkCompactCollector::CollectGarbage() {
2298 : {
2299 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
2300 0 : heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
2301 0 : CleanupSweepToIteratePages();
2302 : }
2303 :
2304 0 : MarkLiveObjects();
2305 0 : ClearNonLiveReferences();
2306 : #ifdef VERIFY_HEAP
2307 : if (FLAG_verify_heap) {
2308 : YoungGenerationMarkingVerifier verifier(heap());
2309 : verifier.Run();
2310 : }
2311 : #endif // VERIFY_HEAP
2312 :
2313 0 : Evacuate();
2314 : #ifdef VERIFY_HEAP
2315 : if (FLAG_verify_heap) {
2316 : YoungGenerationEvacuationVerifier verifier(heap());
2317 : verifier.Run();
2318 : }
2319 : #endif // VERIFY_HEAP
2320 :
2321 : {
2322 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
2323 0 : heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
2324 : }
2325 :
2326 : {
2327 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
2328 0 : for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
2329 0 : heap()->new_space()->FromSpaceEnd())) {
2330 : DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
2331 : non_atomic_marking_state()->ClearLiveness(p);
2332 0 : if (FLAG_concurrent_marking) {
2333 : // Ensure that concurrent marker does not track pages that are
2334 : // going to be unmapped.
2335 0 : heap()->concurrent_marking()->ClearLiveness(p);
2336 : }
2337 0 : }
2338 : }
2339 :
2340 0 : heap()->account_external_memory_concurrently_freed();
2341 0 : }
2342 :
2343 0 : void MinorMarkCompactCollector::MakeIterable(
2344 0 : Page* p, MarkingTreatmentMode marking_mode,
2345 : FreeSpaceTreatmentMode free_space_mode) {
2346 : // We have to clear the full collectors markbits for the areas that we
2347 : // remove here.
2348 : MarkCompactCollector* full_collector = heap()->mark_compact_collector();
2349 0 : Address free_start = p->area_start();
2350 : DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
2351 :
2352 0 : for (auto object_and_size :
2353 0 : LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
2354 : HeapObject* const object = object_and_size.first;
2355 : DCHECK(non_atomic_marking_state()->IsGrey(object));
2356 0 : Address free_end = object->address();
2357 0 : if (free_end != free_start) {
2358 0 : CHECK_GT(free_end, free_start);
2359 0 : size_t size = static_cast<size_t>(free_end - free_start);
2360 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
2361 : p->AddressToMarkbitIndex(free_start),
2362 0 : p->AddressToMarkbitIndex(free_end));
2363 0 : if (free_space_mode == ZAP_FREE_SPACE) {
2364 : memset(free_start, 0xcc, size);
2365 : }
2366 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2367 0 : ClearRecordedSlots::kNo);
2368 : }
2369 : Map* map = object->synchronized_map();
2370 0 : int size = object->SizeFromMap(map);
2371 0 : free_start = free_end + size;
2372 : }
2373 :
2374 0 : if (free_start != p->area_end()) {
2375 0 : CHECK_GT(p->area_end(), free_start);
2376 0 : size_t size = static_cast<size_t>(p->area_end() - free_start);
2377 : full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
2378 : p->AddressToMarkbitIndex(free_start),
2379 0 : p->AddressToMarkbitIndex(p->area_end()));
2380 0 : if (free_space_mode == ZAP_FREE_SPACE) {
2381 : memset(free_start, 0xcc, size);
2382 : }
2383 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2384 0 : ClearRecordedSlots::kNo);
2385 : }
2386 :
2387 0 : if (marking_mode == MarkingTreatmentMode::CLEAR) {
2388 : non_atomic_marking_state()->ClearLiveness(p);
2389 : p->ClearFlag(Page::SWEEP_TO_ITERATE);
2390 : }
2391 0 : }
2392 :
2393 0 : void MinorMarkCompactCollector::ClearNonLiveReferences() {
2394 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
2395 :
2396 : {
2397 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
2398 : // Internalized strings are always stored in old space, so there is no need
2399 : // to clean them here.
2400 : YoungGenerationExternalStringTableCleaner external_visitor(this);
2401 0 : heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
2402 0 : heap()->external_string_table_.CleanUpNewSpaceStrings();
2403 : }
2404 :
2405 : {
2406 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
2407 : // Process the weak references.
2408 : MinorMarkCompactWeakObjectRetainer retainer(this);
2409 0 : heap()->ProcessYoungWeakReferences(&retainer);
2410 0 : }
2411 0 : }
2412 :
2413 0 : void MinorMarkCompactCollector::EvacuatePrologue() {
2414 0 : NewSpace* new_space = heap()->new_space();
2415 : // Append the list of new space pages to be processed.
2416 0 : for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
2417 0 : new_space_evacuation_pages_.push_back(p);
2418 : }
2419 0 : new_space->Flip();
2420 0 : new_space->ResetAllocationInfo();
2421 0 : }
2422 :
2423 0 : void MinorMarkCompactCollector::EvacuateEpilogue() {
2424 0 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
2425 : // Give pages that are queued to be freed back to the OS.
2426 0 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2427 0 : }
2428 :
2429 0 : void MinorMarkCompactCollector::Evacuate() {
2430 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
2431 0 : base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
2432 :
2433 : {
2434 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
2435 0 : EvacuatePrologue();
2436 : }
2437 :
2438 : {
2439 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
2440 0 : EvacuatePagesInParallel();
2441 : }
2442 :
2443 0 : UpdatePointersAfterEvacuation();
2444 :
2445 : {
2446 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
2447 0 : if (!heap()->new_space()->Rebalance()) {
2448 0 : FatalProcessOutOfMemory("NewSpace::Rebalance");
2449 0 : }
2450 : }
2451 :
2452 : {
2453 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
2454 0 : for (Page* p : new_space_evacuation_pages_) {
2455 0 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
2456 : p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2457 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2458 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2459 : p->SetFlag(Page::SWEEP_TO_ITERATE);
2460 0 : sweep_to_iterate_pages_.push_back(p);
2461 : }
2462 : }
2463 0 : new_space_evacuation_pages_.clear();
2464 : }
2465 :
2466 : {
2467 0 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
2468 0 : EvacuateEpilogue();
2469 0 : }
2470 0 : }
2471 :
2472 56800 : void MarkCompactCollector::MarkLiveObjects() {
2473 924342 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2474 : // The recursive GC marker detects when it is nearing stack overflow,
2475 : // and switches to a different marking system. JS interrupts interfere
2476 : // with the C stack limit check.
2477 : PostponeInterruptsScope postpone(isolate());
2478 :
2479 : {
2480 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2481 169756 : IncrementalMarking* incremental_marking = heap_->incremental_marking();
2482 56800 : if (was_marked_incrementally_) {
2483 15542 : incremental_marking->Finalize();
2484 : } else {
2485 41258 : CHECK(incremental_marking->IsStopped());
2486 56800 : }
2487 : }
2488 :
2489 : #ifdef DEBUG
2490 : DCHECK(state_ == PREPARE_GC);
2491 : state_ = MARK_LIVE_OBJECTS;
2492 : #endif
2493 :
2494 113600 : heap_->local_embedder_heap_tracer()->EnterFinalPause();
2495 :
2496 : RootMarkingVisitor root_visitor(this);
2497 :
2498 : {
2499 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2500 : CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
2501 113600 : MarkRoots(&root_visitor, &custom_root_body_visitor);
2502 : }
2503 :
2504 : {
2505 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
2506 56800 : if (FLAG_concurrent_marking) {
2507 112312 : heap_->concurrent_marking()->RescheduleTasksIfNeeded();
2508 : }
2509 56800 : ProcessMarkingWorklist();
2510 :
2511 56800 : FinishConcurrentMarking();
2512 113600 : ProcessMarkingWorklist();
2513 : }
2514 :
2515 : {
2516 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2517 :
2518 : DCHECK(marking_worklist()->IsEmpty());
2519 :
2520 : // The objects reachable from the roots are marked, yet unreachable
2521 : // objects are unmarked. Mark objects reachable due to host
2522 : // application specific logic or through Harmony weak maps.
2523 : {
2524 227200 : TRACE_GC(heap()->tracer(),
2525 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2526 113600 : ProcessEphemeralMarking(false);
2527 : }
2528 :
2529 : // The objects reachable from the roots, weak maps or object groups
2530 : // are marked. Objects pointed to only by weak global handles cannot be
2531 : // immediately reclaimed. Instead, we have to mark them as pending and mark
2532 : // objects reachable from them.
2533 : //
2534 : // First we identify nonlive weak handles and mark them as pending
2535 : // destruction.
2536 : {
2537 227200 : TRACE_GC(heap()->tracer(),
2538 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2539 : heap()->isolate()->global_handles()->IdentifyWeakHandles(
2540 56800 : &IsUnmarkedHeapObject);
2541 113600 : ProcessMarkingWorklist();
2542 : }
2543 : // Then we mark the objects.
2544 :
2545 : {
2546 227200 : TRACE_GC(heap()->tracer(),
2547 : GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2548 56800 : heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2549 113600 : ProcessMarkingWorklist();
2550 : }
2551 :
2552 : // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2553 : // the weak roots we just marked as pending destruction.
2554 : //
2555 : // We only process harmony collections, as all object groups have been fully
2556 : // processed and no weakly reachable node can discover new objects groups.
2557 : {
2558 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2559 56800 : ProcessEphemeralMarking(true);
2560 : {
2561 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
2562 113600 : heap()->local_embedder_heap_tracer()->TraceEpilogue();
2563 56800 : }
2564 56800 : }
2565 : }
2566 :
2567 56800 : if (was_marked_incrementally_) {
2568 15542 : heap()->incremental_marking()->Deactivate();
2569 56800 : }
2570 56800 : }
2571 :
2572 :
2573 56800 : void MarkCompactCollector::ClearNonLiveReferences() {
2574 624800 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2575 :
2576 : {
2577 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2578 :
2579 : // Prune the string table removing all strings only pointed to by the
2580 : // string table. Cannot use string_table() here because the string
2581 : // table is marked.
2582 56800 : StringTable* string_table = heap()->string_table();
2583 : InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
2584 56800 : string_table->IterateElements(&internalized_visitor);
2585 56800 : string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2586 :
2587 : ExternalStringTableCleaner external_visitor(heap());
2588 56800 : heap()->external_string_table_.IterateAll(&external_visitor);
2589 113600 : heap()->external_string_table_.CleanUpAll();
2590 : }
2591 :
2592 : {
2593 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2594 : // Process the weak references.
2595 : MarkCompactWeakObjectRetainer mark_compact_object_retainer(
2596 56800 : non_atomic_marking_state());
2597 113600 : heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2598 : }
2599 :
2600 : {
2601 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2602 : // ClearFullMapTransitions must be called before WeakCells are cleared.
2603 113600 : ClearFullMapTransitions();
2604 : }
2605 : DependentCode* dependent_code_list;
2606 56800 : ClearWeakCellsAndSimpleMapTransitions(&dependent_code_list);
2607 56800 : MarkDependentCodeForDeoptimization(dependent_code_list);
2608 :
2609 56800 : ClearWeakCollections();
2610 :
2611 : DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
2612 56800 : DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
2613 56800 : }
2614 :
2615 :
2616 56800 : void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2617 : DependentCode* list_head) {
2618 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2619 : Isolate* isolate = this->isolate();
2620 : DependentCode* current = list_head;
2621 128116 : while (current->length() > 0) {
2622 : have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2623 14516 : isolate, DependentCode::kWeakCodeGroup);
2624 : current = current->next_link();
2625 : }
2626 :
2627 : {
2628 124724 : ArrayList* list = heap_->weak_new_space_object_to_code_list();
2629 : int counter = 0;
2630 488932 : for (int i = 0; i < list->Length(); i += 2) {
2631 : WeakCell* obj = WeakCell::cast(list->Get(i));
2632 : WeakCell* dep = WeakCell::cast(list->Get(i + 1));
2633 641420 : if (obj->cleared() || dep->cleared()) {
2634 131190 : if (!dep->cleared()) {
2635 : Code* code = Code::cast(dep->value());
2636 121 : if (!code->marked_for_deoptimization()) {
2637 : DependentCode::SetMarkedForDeoptimization(
2638 15 : code, DependentCode::DependencyGroup::kWeakCodeGroup);
2639 15 : code->InvalidateEmbeddedObjects();
2640 15 : have_code_to_deoptimize_ = true;
2641 : }
2642 : }
2643 : } else {
2644 : // We record the slot manually because marking is finished at this
2645 : // point and the write barrier would bailout.
2646 : list->Set(counter, obj, SKIP_WRITE_BARRIER);
2647 : RecordSlot(list, list->Slot(counter), obj);
2648 : counter++;
2649 : list->Set(counter, dep, SKIP_WRITE_BARRIER);
2650 : RecordSlot(list, list->Slot(counter), dep);
2651 : counter++;
2652 : }
2653 : }
2654 : }
2655 :
2656 56800 : WeakHashTable* table = heap_->weak_object_to_code_table();
2657 56800 : uint32_t capacity = table->Capacity();
2658 1923616 : for (uint32_t i = 0; i < capacity; i++) {
2659 1866816 : uint32_t key_index = table->EntryToIndex(i);
2660 : Object* key = table->get(key_index);
2661 1866816 : if (!table->IsKey(isolate, key)) continue;
2662 : uint32_t value_index = table->EntryToValueIndex(i);
2663 : Object* value = table->get(value_index);
2664 : DCHECK(key->IsWeakCell());
2665 89054 : if (WeakCell::cast(key)->cleared()) {
2666 : have_code_to_deoptimize_ |=
2667 : DependentCode::cast(value)->MarkCodeForDeoptimization(
2668 5562 : isolate, DependentCode::kWeakCodeGroup);
2669 11124 : table->set(key_index, heap_->the_hole_value());
2670 11124 : table->set(value_index, heap_->the_hole_value());
2671 5562 : table->ElementRemoved();
2672 : }
2673 56800 : }
2674 56800 : }
2675 :
2676 344313 : void MarkCompactCollector::ClearSimpleMapTransition(
2677 : WeakCell* potential_transition, Map* dead_target) {
2678 : DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
2679 : Object* potential_parent = dead_target->constructor_or_backpointer();
2680 344313 : if (potential_parent->IsMap()) {
2681 : Map* parent = Map::cast(potential_parent);
2682 : DisallowHeapAllocation no_gc_obviously;
2683 663372 : if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
2684 : TransitionsAccessor(parent, &no_gc_obviously)
2685 424656 : .HasSimpleTransitionTo(potential_transition)) {
2686 29448 : ClearSimpleMapTransition(parent, dead_target);
2687 : }
2688 : }
2689 344313 : }
2690 :
2691 29448 : void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2692 : Map* dead_target) {
2693 : DCHECK(!map->is_prototype_map());
2694 : DCHECK(!dead_target->is_prototype_map());
2695 : // Clear the useless weak cell pointer, and take ownership of the descriptor
2696 : // array.
2697 29448 : map->set_raw_transitions(Smi::kZero);
2698 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2699 : DescriptorArray* descriptors = map->instance_descriptors();
2700 29448 : if (descriptors == dead_target->instance_descriptors() &&
2701 : number_of_own_descriptors > 0) {
2702 513 : TrimDescriptorArray(map, descriptors);
2703 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2704 : map->set_owns_descriptors(true);
2705 : }
2706 29448 : }
2707 :
2708 56800 : void MarkCompactCollector::ClearFullMapTransitions() {
2709 : TransitionArray* array;
2710 795769 : while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
2711 682169 : int num_transitions = array->number_of_entries();
2712 682169 : if (num_transitions > 0) {
2713 518363 : Map* map = array->GetTarget(0);
2714 : DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
2715 : Map* parent = Map::cast(map->constructor_or_backpointer());
2716 : bool parent_is_alive = non_atomic_marking_state()->IsBlackOrGrey(parent);
2717 : DescriptorArray* descriptors =
2718 518363 : parent_is_alive ? parent->instance_descriptors() : nullptr;
2719 : bool descriptors_owner_died =
2720 518363 : CompactTransitionArray(parent, array, descriptors);
2721 518363 : if (descriptors_owner_died) {
2722 2680 : TrimDescriptorArray(parent, descriptors);
2723 : }
2724 : }
2725 : }
2726 56800 : }
2727 :
2728 518363 : bool MarkCompactCollector::CompactTransitionArray(
2729 : Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2730 : DCHECK(!map->is_prototype_map());
2731 : int num_transitions = transitions->number_of_entries();
2732 : bool descriptors_owner_died = false;
2733 : int transition_index = 0;
2734 : // Compact all live transitions to the left.
2735 1221960 : for (int i = 0; i < num_transitions; ++i) {
2736 : Map* target = transitions->GetTarget(i);
2737 : DCHECK_EQ(target->constructor_or_backpointer(), map);
2738 703597 : if (non_atomic_marking_state()->IsWhite(target)) {
2739 144138 : if (descriptors != nullptr &&
2740 : target->instance_descriptors() == descriptors) {
2741 : DCHECK(!target->is_prototype_map());
2742 : descriptors_owner_died = true;
2743 : }
2744 : } else {
2745 631528 : if (i != transition_index) {
2746 : Name* key = transitions->GetKey(i);
2747 : transitions->SetKey(transition_index, key);
2748 : Object** key_slot = transitions->GetKeySlot(transition_index);
2749 : RecordSlot(transitions, key_slot, key);
2750 18469 : Object* raw_target = transitions->GetRawTarget(i);
2751 : transitions->SetTarget(transition_index, raw_target);
2752 : Object** target_slot = transitions->GetTargetSlot(transition_index);
2753 : RecordSlot(transitions, target_slot, raw_target);
2754 : }
2755 631528 : transition_index++;
2756 : }
2757 : }
2758 : // If there are no transitions to be cleared, return.
2759 518363 : if (transition_index == num_transitions) {
2760 : DCHECK(!descriptors_owner_died);
2761 : return false;
2762 : }
2763 : // Note that we never eliminate a transition array, though we might right-trim
2764 : // such that number_of_transitions() == 0. If this assumption changes,
2765 : // TransitionArray::Insert() will need to deal with the case that a transition
2766 : // array disappeared during GC.
2767 19770 : int trim = transitions->Capacity() - transition_index;
2768 19770 : if (trim > 0) {
2769 : heap_->RightTrimFixedArray(transitions,
2770 19770 : trim * TransitionArray::kTransitionSize);
2771 : transitions->SetNumberOfTransitions(transition_index);
2772 : }
2773 19770 : return descriptors_owner_died;
2774 : }
2775 :
2776 :
2777 3193 : void MarkCompactCollector::TrimDescriptorArray(Map* map,
2778 : DescriptorArray* descriptors) {
2779 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2780 3193 : if (number_of_own_descriptors == 0) {
2781 : DCHECK(descriptors == heap_->empty_descriptor_array());
2782 3193 : return;
2783 : }
2784 :
2785 : int number_of_descriptors = descriptors->number_of_descriptors_storage();
2786 3051 : int to_trim = number_of_descriptors - number_of_own_descriptors;
2787 3051 : if (to_trim > 0) {
2788 : heap_->RightTrimFixedArray(descriptors,
2789 2797 : to_trim * DescriptorArray::kEntrySize);
2790 : descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2791 :
2792 2797 : TrimEnumCache(map, descriptors);
2793 2797 : descriptors->Sort();
2794 :
2795 : if (FLAG_unbox_double_fields) {
2796 : LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2797 : layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2798 2797 : number_of_own_descriptors);
2799 : SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2800 : }
2801 : }
2802 : DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2803 : map->set_owns_descriptors(true);
2804 : }
2805 :
2806 :
2807 2797 : void MarkCompactCollector::TrimEnumCache(Map* map,
2808 : DescriptorArray* descriptors) {
2809 : int live_enum = map->EnumLength();
2810 2797 : if (live_enum == kInvalidEnumCacheSentinel) {
2811 2756 : live_enum = map->NumberOfEnumerableProperties();
2812 : }
2813 2797 : if (live_enum == 0) return descriptors->ClearEnumCache();
2814 : EnumCache* enum_cache = descriptors->GetEnumCache();
2815 :
2816 : FixedArray* keys = enum_cache->keys();
2817 2707 : int to_trim = keys->length() - live_enum;
2818 2707 : if (to_trim <= 0) return;
2819 37 : heap_->RightTrimFixedArray(keys, to_trim);
2820 :
2821 : FixedArray* indices = enum_cache->indices();
2822 37 : to_trim = indices->length() - live_enum;
2823 37 : if (to_trim <= 0) return;
2824 34 : heap_->RightTrimFixedArray(indices, to_trim);
2825 : }
2826 :
2827 :
2828 113657 : void MarkCompactCollector::ProcessWeakCollections() {
2829 113657 : MarkCompactMarkingVisitor visitor(this, atomic_marking_state());
2830 113657 : Object* weak_collection_obj = heap()->encountered_weak_collections();
2831 245448 : while (weak_collection_obj != Smi::kZero) {
2832 : JSWeakCollection* weak_collection =
2833 : reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2834 : DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
2835 18134 : if (weak_collection->table()->IsHashTable()) {
2836 : ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2837 198132 : for (int i = 0; i < table->Capacity(); i++) {
2838 : HeapObject* heap_object = HeapObject::cast(table->KeyAt(i));
2839 80936 : if (non_atomic_marking_state()->IsBlackOrGrey(heap_object)) {
2840 : Object** key_slot =
2841 : table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2842 80542 : RecordSlot(table, key_slot, *key_slot);
2843 : Object** value_slot =
2844 : table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2845 77058 : visitor.VisitPointer(table, value_slot);
2846 : }
2847 : }
2848 : }
2849 : weak_collection_obj = weak_collection->next();
2850 : }
2851 113657 : }
2852 :
2853 :
2854 56800 : void MarkCompactCollector::ClearWeakCollections() {
2855 349838 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2856 56800 : Object* weak_collection_obj = heap()->encountered_weak_collections();
2857 122638 : while (weak_collection_obj != Smi::kZero) {
2858 : JSWeakCollection* weak_collection =
2859 : reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2860 : DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
2861 9038 : if (weak_collection->table()->IsHashTable()) {
2862 : ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2863 98240 : for (int i = 0; i < table->Capacity(); i++) {
2864 : HeapObject* key = HeapObject::cast(table->KeyAt(i));
2865 40084 : if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2866 90 : table->RemoveEntry(i);
2867 : }
2868 : }
2869 : }
2870 : weak_collection_obj = weak_collection->next();
2871 9038 : weak_collection->set_next(heap()->undefined_value());
2872 : }
2873 56800 : heap()->set_encountered_weak_collections(Smi::kZero);
2874 56800 : }
2875 :
2876 :
2877 912 : void MarkCompactCollector::AbortWeakCollections() {
2878 1824 : Object* weak_collection_obj = heap()->encountered_weak_collections();
2879 1824 : while (weak_collection_obj != Smi::kZero) {
2880 : JSWeakCollection* weak_collection =
2881 : reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2882 : weak_collection_obj = weak_collection->next();
2883 0 : weak_collection->set_next(heap()->undefined_value());
2884 : }
2885 : heap()->set_encountered_weak_collections(Smi::kZero);
2886 912 : }
2887 :
2888 56800 : void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
2889 : DependentCode** dependent_code_list) {
2890 113600 : Heap* heap = this->heap();
2891 227200 : TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2892 : DependentCode* dependent_code_head =
2893 : DependentCode::cast(heap->empty_fixed_array());
2894 : WeakCell* weak_cell;
2895 10265915 : while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
2896 : // We do not insert cleared weak cells into the list, so the value
2897 : // cannot be a Smi here.
2898 10152315 : HeapObject* value = HeapObject::cast(weak_cell->value());
2899 10152315 : if (!non_atomic_marking_state()->IsBlackOrGrey(value)) {
2900 : // Cells for new-space objects embedded in optimized code are wrapped in
2901 : // WeakCell and put into Heap::weak_object_to_code_table.
2902 : // Such cells do not have any strong references but we want to keep them
2903 : // alive as long as the cell value is alive.
2904 : // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2905 2673152 : if (value->IsCell()) {
2906 : Object* cell_value = Cell::cast(value)->value();
2907 674988 : if (cell_value->IsHeapObject() &&
2908 : non_atomic_marking_state()->IsBlackOrGrey(
2909 : HeapObject::cast(cell_value))) {
2910 : // Resurrect the cell.
2911 : non_atomic_marking_state()->WhiteToBlack(value);
2912 132 : Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2913 132 : RecordSlot(value, slot, *slot);
2914 132 : slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2915 132 : RecordSlot(weak_cell, slot, *slot);
2916 : } else {
2917 337362 : weak_cell->clear();
2918 : }
2919 2335658 : } else if (value->IsMap()) {
2920 : // The map is non-live.
2921 : Map* map = Map::cast(value);
2922 : // Add dependent code to the dependent_code_list.
2923 : DependentCode* candidate = map->dependent_code();
2924 : // We rely on the fact that the weak code group comes first.
2925 : STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2926 361829 : if (candidate->length() > 0 &&
2927 : candidate->group() == DependentCode::kWeakCodeGroup) {
2928 : candidate->set_next_link(dependent_code_head);
2929 : dependent_code_head = candidate;
2930 : }
2931 344313 : ClearSimpleMapTransition(weak_cell, map);
2932 344313 : weak_cell->clear();
2933 : } else {
2934 : // All other objects.
2935 1991345 : weak_cell->clear();
2936 : }
2937 : } else {
2938 : // The value of the weak cell is alive.
2939 7479163 : Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2940 7479163 : RecordSlot(weak_cell, slot, *slot);
2941 : }
2942 : }
2943 113600 : *dependent_code_list = dependent_code_head;
2944 56800 : }
2945 :
2946 0 : void MarkCompactCollector::AbortWeakObjects() {
2947 54277 : weak_objects_.weak_cells.Clear();
2948 54277 : weak_objects_.transition_arrays.Clear();
2949 0 : }
2950 :
2951 203943803 : void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2952 : Object* target) {
2953 : Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2954 : Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2955 407788999 : if (target_page->IsEvacuationCandidate() &&
2956 33221 : (rinfo->host() == nullptr ||
2957 : !source_page->ShouldSkipEvacuationSlotRecording())) {
2958 : RelocInfo::Mode rmode = rinfo->rmode();
2959 : Address addr = rinfo->pc();
2960 : SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2961 32693 : if (rinfo->IsInConstantPool()) {
2962 : addr = rinfo->constant_pool_entry_address();
2963 : if (RelocInfo::IsCodeTarget(rmode)) {
2964 : slot_type = CODE_ENTRY_SLOT;
2965 : } else {
2966 : DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2967 : slot_type = OBJECT_SLOT;
2968 : }
2969 : }
2970 : RememberedSet<OLD_TO_OLD>::InsertTyped(
2971 32693 : source_page, reinterpret_cast<Address>(host), slot_type, addr);
2972 : }
2973 203877889 : }
2974 :
2975 : template <AccessMode access_mode>
2976 404900444 : static inline SlotCallbackResult UpdateSlot(Object** slot) {
2977 404900444 : Object* obj = *slot;
2978 404900444 : if (obj->IsHeapObject()) {
2979 : HeapObject* heap_obj = HeapObject::cast(obj);
2980 : MapWord map_word = heap_obj->map_word();
2981 365238884 : if (map_word.IsForwardingAddress()) {
2982 : DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
2983 : MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2984 : Page::FromAddress(heap_obj->address())
2985 : ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2986 19546338 : HeapObject* target = map_word.ToForwardingAddress();
2987 : if (access_mode == AccessMode::NON_ATOMIC) {
2988 19546338 : *slot = target;
2989 : } else {
2990 : base::AsAtomicPointer::Release_CompareAndSwap(slot, obj, target);
2991 : }
2992 : DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
2993 : DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2994 : }
2995 : }
2996 : // OLD_TO_OLD slots are always removed after updating.
2997 404900444 : return REMOVE_SLOT;
2998 : }
2999 :
3000 : // Visitor for updating root pointers and to-space pointers.
3001 : // It does not expect to encounter pointers to dead objects.
3002 : // TODO(ulan): Remove code object specific functions. This visitor
3003 : // nevers visits code objects.
3004 56800 : class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
3005 : public:
3006 26604 : void VisitPointer(HeapObject* host, Object** p) override {
3007 : UpdateSlotInternal(p);
3008 26604 : }
3009 :
3010 16165482 : void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3011 167107425 : for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
3012 16281866 : }
3013 :
3014 168821750 : void VisitRootPointer(Root root, Object** p) override {
3015 : UpdateSlotInternal(p);
3016 168821750 : }
3017 :
3018 860112 : void VisitRootPointers(Root root, Object** start, Object** end) override {
3019 77909994 : for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
3020 860112 : }
3021 :
3022 0 : void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
3023 0 : UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
3024 0 : }
3025 :
3026 0 : void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
3027 0 : UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
3028 0 : }
3029 :
3030 : private:
3031 0 : static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
3032 396723795 : return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3033 : }
3034 : };
3035 :
3036 619875 : static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3037 : Object** p) {
3038 619875 : MapWord map_word = HeapObject::cast(*p)->map_word();
3039 :
3040 619875 : if (map_word.IsForwardingAddress()) {
3041 1712 : return String::cast(map_word.ToForwardingAddress());
3042 : }
3043 :
3044 618163 : return String::cast(*p);
3045 : }
3046 :
3047 56800 : void MarkCompactCollector::EvacuatePrologue() {
3048 : // New space.
3049 56800 : NewSpace* new_space = heap()->new_space();
3050 : // Append the list of new space pages to be processed.
3051 308316 : for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
3052 68958 : new_space_evacuation_pages_.push_back(p);
3053 : }
3054 56800 : new_space->Flip();
3055 56800 : new_space->ResetAllocationInfo();
3056 :
3057 : // Old space.
3058 : DCHECK(old_space_evacuation_pages_.empty());
3059 56800 : old_space_evacuation_pages_ = std::move(evacuation_candidates_);
3060 : evacuation_candidates_.clear();
3061 : DCHECK(evacuation_candidates_.empty());
3062 56800 : }
3063 :
3064 56800 : void MarkCompactCollector::EvacuateEpilogue() {
3065 : aborted_evacuation_candidates_.clear();
3066 : // New space.
3067 170400 : heap()->new_space()->set_age_mark(heap()->new_space()->top());
3068 : // Deallocate unmarked large objects.
3069 56800 : heap()->lo_space()->FreeUnmarkedObjects();
3070 : // Old space. Deallocate evacuated candidate pages.
3071 56800 : ReleaseEvacuationCandidates();
3072 : // Give pages that are queued to be freed back to the OS.
3073 56800 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3074 : #ifdef DEBUG
3075 : // Old-to-old slot sets must be empty after evacuation.
3076 : for (Page* p : *heap()->old_space()) {
3077 : DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3078 : DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3079 : DCHECK_NULL(p->invalidated_slots());
3080 : }
3081 : #endif
3082 56800 : }
3083 :
3084 : class Evacuator : public Malloced {
3085 : public:
3086 : enum EvacuationMode {
3087 : kObjectsNewToOld,
3088 : kPageNewToOld,
3089 : kObjectsOldToOld,
3090 : kPageNewToNew,
3091 : };
3092 :
3093 : static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3094 : // Note: The order of checks is important in this function.
3095 130444 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3096 : return kPageNewToOld;
3097 129286 : if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3098 : return kPageNewToNew;
3099 127298 : if (chunk->InNewSpace()) return kObjectsNewToOld;
3100 : return kObjectsOldToOld;
3101 : }
3102 :
3103 : // NewSpacePages with more live bytes than this threshold qualify for fast
3104 : // evacuation.
3105 : static int PageEvacuationThreshold() {
3106 46927 : if (FLAG_page_promotion)
3107 46893 : return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3108 : return Page::kAllocatableMemory + kPointerSize;
3109 : }
3110 :
3111 56062 : Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
3112 : : heap_(heap),
3113 : local_allocator_(heap_),
3114 : compaction_spaces_(heap_),
3115 : local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
3116 : new_space_visitor_(heap_, &local_allocator_, record_visitor,
3117 : &local_pretenuring_feedback_),
3118 : new_to_new_page_visitor_(heap_, record_visitor,
3119 : &local_pretenuring_feedback_),
3120 : new_to_old_page_visitor_(heap_, record_visitor,
3121 : &local_pretenuring_feedback_),
3122 :
3123 : old_space_visitor_(heap_, &local_allocator_, record_visitor),
3124 : duration_(0.0),
3125 168186 : bytes_compacted_(0) {}
3126 :
3127 168186 : virtual ~Evacuator() {}
3128 :
3129 : void EvacuatePage(Page* page);
3130 :
3131 256 : void AddObserver(MigrationObserver* observer) {
3132 : new_space_visitor_.AddObserver(observer);
3133 : old_space_visitor_.AddObserver(observer);
3134 256 : }
3135 :
3136 : // Merge back locally cached info sequentially. Note that this method needs
3137 : // to be called from the main thread.
3138 : inline void Finalize();
3139 :
3140 : protected:
3141 : static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3142 :
3143 : // |saved_live_bytes| returns the live bytes of the page that was processed.
3144 : virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
3145 :
3146 : inline Heap* heap() { return heap_; }
3147 :
3148 : void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3149 57764 : duration_ += duration;
3150 57764 : bytes_compacted_ += bytes_compacted;
3151 : }
3152 :
3153 : Heap* heap_;
3154 :
3155 : // Locally cached collector data.
3156 : LocalAllocator local_allocator_;
3157 : CompactionSpaceCollection compaction_spaces_;
3158 : Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
3159 :
3160 : // Visitors for the corresponding spaces.
3161 : EvacuateNewSpaceVisitor new_space_visitor_;
3162 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
3163 : new_to_new_page_visitor_;
3164 : EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
3165 : new_to_old_page_visitor_;
3166 : EvacuateOldSpaceVisitor old_space_visitor_;
3167 :
3168 : // Book keeping info.
3169 : double duration_;
3170 : intptr_t bytes_compacted_;
3171 : };
3172 :
3173 115476 : void Evacuator::EvacuatePage(Page* page) {
3174 : DCHECK(page->SweepingDone());
3175 57738 : intptr_t saved_live_bytes = 0;
3176 57738 : double evacuation_time = 0.0;
3177 : {
3178 : AlwaysAllocateScope always_allocate(heap()->isolate());
3179 : TimedScope timed_scope(&evacuation_time);
3180 57761 : RawEvacuatePage(page, &saved_live_bytes);
3181 : }
3182 57764 : ReportCompactionProgress(evacuation_time, saved_live_bytes);
3183 57764 : if (FLAG_trace_evacuation) {
3184 : PrintIsolate(
3185 : heap()->isolate(),
3186 : "evacuation[%p]: page=%p new_space=%d "
3187 : "page_evacuation=%d executable=%d contains_age_mark=%d "
3188 : "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
3189 : static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
3190 0 : page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3191 : page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3192 : page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3193 0 : page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
3194 0 : evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
3195 : }
3196 57764 : }
3197 :
3198 336372 : void Evacuator::Finalize() {
3199 56062 : local_allocator_.Finalize();
3200 112124 : heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3201 224248 : heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3202 168186 : new_to_old_page_visitor_.moved_bytes());
3203 : heap()->IncrementSemiSpaceCopiedObjectSize(
3204 56062 : new_space_visitor_.semispace_copied_size() +
3205 168186 : new_to_new_page_visitor_.moved_bytes());
3206 : heap()->IncrementYoungSurvivorsCounter(
3207 56062 : new_space_visitor_.promoted_size() +
3208 56062 : new_space_visitor_.semispace_copied_size() +
3209 56062 : new_to_old_page_visitor_.moved_bytes() +
3210 56062 : new_to_new_page_visitor_.moved_bytes());
3211 112124 : heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3212 56062 : }
3213 :
3214 112124 : class FullEvacuator : public Evacuator {
3215 : public:
3216 : FullEvacuator(MarkCompactCollector* collector,
3217 : RecordMigratedSlotVisitor* record_visitor)
3218 56062 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
3219 :
3220 : protected:
3221 : void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
3222 :
3223 : MarkCompactCollector* collector_;
3224 : };
3225 :
3226 117048 : void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
3227 : MarkCompactCollector::NonAtomicMarkingState* marking_state =
3228 57738 : collector_->non_atomic_marking_state();
3229 57738 : *live_bytes = marking_state->live_bytes(page);
3230 57738 : HeapObject* failed_object = nullptr;
3231 57738 : switch (ComputeEvacuationMode(page)) {
3232 : case kObjectsNewToOld:
3233 : LiveObjectVisitor::VisitBlackObjectsNoFail(
3234 : page, marking_state, &new_space_visitor_,
3235 52423 : LiveObjectVisitor::kClearMarkbits);
3236 : // ArrayBufferTracker will be updated during pointers updating.
3237 52445 : break;
3238 : case kPageNewToOld:
3239 : LiveObjectVisitor::VisitBlackObjectsNoFail(
3240 : page, marking_state, &new_to_old_page_visitor_,
3241 576 : LiveObjectVisitor::kKeepMarking);
3242 : new_to_old_page_visitor_.account_moved_bytes(
3243 : marking_state->live_bytes(page));
3244 : // ArrayBufferTracker will be updated during sweeping.
3245 : break;
3246 : case kPageNewToNew:
3247 : LiveObjectVisitor::VisitBlackObjectsNoFail(
3248 : page, marking_state, &new_to_new_page_visitor_,
3249 992 : LiveObjectVisitor::kKeepMarking);
3250 : new_to_new_page_visitor_.account_moved_bytes(
3251 : marking_state->live_bytes(page));
3252 : // ArrayBufferTracker will be updated during sweeping.
3253 : break;
3254 : case kObjectsOldToOld: {
3255 : const bool success = LiveObjectVisitor::VisitBlackObjects(
3256 : page, marking_state, &old_space_visitor_,
3257 3741 : LiveObjectVisitor::kClearMarkbits, &failed_object);
3258 3748 : if (!success) {
3259 : // Aborted compaction page. Actual processing happens on the main
3260 : // thread for simplicity reasons.
3261 28 : collector_->ReportAbortedEvacuationCandidate(failed_object, page);
3262 : } else {
3263 : // ArrayBufferTracker will be updated during pointers updating.
3264 : }
3265 : break;
3266 : }
3267 : }
3268 57771 : }
3269 :
3270 0 : class YoungGenerationEvacuator : public Evacuator {
3271 : public:
3272 : YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
3273 : RecordMigratedSlotVisitor* record_visitor)
3274 0 : : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
3275 :
3276 : protected:
3277 : void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
3278 :
3279 : MinorMarkCompactCollector* collector_;
3280 : };
3281 :
3282 0 : void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
3283 : intptr_t* live_bytes) {
3284 : MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
3285 0 : collector_->non_atomic_marking_state();
3286 0 : *live_bytes = marking_state->live_bytes(page);
3287 0 : switch (ComputeEvacuationMode(page)) {
3288 : case kObjectsNewToOld:
3289 : LiveObjectVisitor::VisitGreyObjectsNoFail(
3290 : page, marking_state, &new_space_visitor_,
3291 0 : LiveObjectVisitor::kClearMarkbits);
3292 : // ArrayBufferTracker will be updated during pointers updating.
3293 0 : break;
3294 : case kPageNewToOld:
3295 : LiveObjectVisitor::VisitGreyObjectsNoFail(
3296 : page, marking_state, &new_to_old_page_visitor_,
3297 0 : LiveObjectVisitor::kKeepMarking);
3298 : new_to_old_page_visitor_.account_moved_bytes(
3299 : marking_state->live_bytes(page));
3300 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
3301 : // delay it until the next GC.
3302 0 : ArrayBufferTracker::FreeDead(page, marking_state);
3303 0 : if (heap()->ShouldZapGarbage()) {
3304 : collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3305 : ZAP_FREE_SPACE);
3306 0 : } else if (heap()->incremental_marking()->IsMarking()) {
3307 : // When incremental marking is on, we need to clear the mark bits of
3308 : // the full collector. We cannot yet discard the young generation mark
3309 : // bits as they are still relevant for pointers updating.
3310 : collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3311 0 : IGNORE_FREE_SPACE);
3312 : }
3313 : break;
3314 : case kPageNewToNew:
3315 : LiveObjectVisitor::VisitGreyObjectsNoFail(
3316 : page, marking_state, &new_to_new_page_visitor_,
3317 0 : LiveObjectVisitor::kKeepMarking);
3318 : new_to_new_page_visitor_.account_moved_bytes(
3319 : marking_state->live_bytes(page));
3320 : // TODO(mlippautz): If cleaning array buffers is too slow here we can
3321 : // delay it until the next GC.
3322 0 : ArrayBufferTracker::FreeDead(page, marking_state);
3323 : if (heap()->ShouldZapGarbage()) {
3324 : collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3325 : ZAP_FREE_SPACE);
3326 0 : } else if (heap()->incremental_marking()->IsMarking()) {
3327 : // When incremental marking is on, we need to clear the mark bits of
3328 : // the full collector. We cannot yet discard the young generation mark
3329 : // bits as they are still relevant for pointers updating.
3330 : collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3331 0 : IGNORE_FREE_SPACE);
3332 : }
3333 : break;
3334 : case kObjectsOldToOld:
3335 0 : UNREACHABLE();
3336 : break;
3337 : }
3338 0 : }
3339 :
3340 : class PageEvacuationItem : public ItemParallelJob::Item {
3341 : public:
3342 57765 : explicit PageEvacuationItem(Page* page) : page_(page) {}
3343 115530 : virtual ~PageEvacuationItem() {}
3344 : Page* page() const { return page_; }
3345 :
3346 : private:
3347 : Page* page_;
3348 : };
3349 :
3350 112115 : class PageEvacuationTask : public ItemParallelJob::Task {
3351 : public:
3352 : PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
3353 56062 : : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
3354 :
3355 55564 : void RunInParallel() override {
3356 57758 : PageEvacuationItem* item = nullptr;
3357 168893 : while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
3358 57758 : evacuator_->EvacuatePage(item->page());
3359 57764 : item->MarkFinished();
3360 : }
3361 55582 : };
3362 :
3363 : private:
3364 : Evacuator* evacuator_;
3365 : };
3366 :
3367 : template <class Evacuator, class Collector>
3368 47003 : void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3369 : Collector* collector, ItemParallelJob* job,
3370 : RecordMigratedSlotVisitor* record_visitor,
3371 103065 : MigrationObserver* migration_observer, const intptr_t live_bytes) {
3372 : // Used for trace summary.
3373 : double compaction_speed = 0;
3374 47003 : if (FLAG_trace_evacuation) {
3375 0 : compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3376 : }
3377 :
3378 : const bool profiling =
3379 47003 : heap()->isolate()->is_profiling() ||
3380 46910 : heap()->isolate()->logger()->is_logging_code_events() ||
3381 93913 : heap()->isolate()->heap_profiler()->is_tracking_object_moves();
3382 : ProfilingMigrationObserver profiling_observer(heap());
3383 :
3384 : const int wanted_num_tasks =
3385 : NumberOfParallelCompactionTasks(job->NumberOfItems());
3386 47003 : Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3387 103065 : for (int i = 0; i < wanted_num_tasks; i++) {
3388 56062 : evacuators[i] = new Evacuator(collector, record_visitor);
3389 56062 : if (profiling) evacuators[i]->AddObserver(&profiling_observer);
3390 56062 : if (migration_observer != nullptr)
3391 0 : evacuators[i]->AddObserver(migration_observer);
3392 56062 : job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
3393 : }
3394 47003 : job->Run();
3395 103065 : for (int i = 0; i < wanted_num_tasks; i++) {
3396 56062 : evacuators[i]->Finalize();
3397 56062 : delete evacuators[i];
3398 : }
3399 47003 : delete[] evacuators;
3400 :
3401 47003 : if (FLAG_trace_evacuation) {
3402 0 : PrintIsolate(isolate(),
3403 : "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3404 : "wanted_tasks=%d tasks=%d cores=%" PRIuS
3405 : " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3406 : isolate()->time_millis_since_init(),
3407 : FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
3408 : wanted_num_tasks, job->NumberOfTasks(),
3409 0 : V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3410 0 : live_bytes, compaction_speed);
3411 : }
3412 47003 : }
3413 :
3414 54017 : bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
3415 54017 : const bool reduce_memory = heap()->ShouldReduceMemory();
3416 54017 : const Address age_mark = heap()->new_space()->age_mark();
3417 93854 : return !reduce_memory && !p->NeverEvacuate() &&
3418 48960 : (live_bytes > Evacuator::PageEvacuationThreshold()) &&
3419 57171 : !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
3420 : }
3421 :
3422 56800 : void MarkCompactCollector::EvacuatePagesInParallel() {
3423 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
3424 114177 : &page_parallel_job_semaphore_);
3425 : intptr_t live_bytes = 0;
3426 :
3427 117348 : for (Page* page : old_space_evacuation_pages_) {
3428 3748 : live_bytes += non_atomic_marking_state()->live_bytes(page);
3429 3748 : evacuation_job.AddItem(new PageEvacuationItem(page));
3430 : }
3431 :
3432 183135 : for (Page* page : new_space_evacuation_pages_) {
3433 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3434 68958 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
3435 54017 : live_bytes += live_bytes_on_page;
3436 54017 : if (ShouldMovePage(page, live_bytes_on_page)) {
3437 1572 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3438 577 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3439 : DCHECK_EQ(heap()->old_space(), page->owner());
3440 : // The move added page->allocated_bytes to the old space, but we are
3441 : // going to sweep the page and add page->live_byte_count.
3442 577 : heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
3443 : page);
3444 : } else {
3445 995 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3446 : }
3447 : }
3448 54017 : evacuation_job.AddItem(new PageEvacuationItem(page));
3449 : }
3450 66597 : if (evacuation_job.NumberOfItems() == 0) return;
3451 :
3452 : RecordMigratedSlotVisitor record_visitor(this);
3453 : CreateAndExecuteEvacuationTasks<FullEvacuator>(
3454 47003 : this, &evacuation_job, &record_visitor, nullptr, live_bytes);
3455 94006 : PostProcessEvacuationCandidates();
3456 : }
3457 :
3458 0 : void MinorMarkCompactCollector::EvacuatePagesInParallel() {
3459 : ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
3460 0 : &page_parallel_job_semaphore_);
3461 : intptr_t live_bytes = 0;
3462 :
3463 0 : for (Page* page : new_space_evacuation_pages_) {
3464 : intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3465 0 : if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
3466 0 : live_bytes += live_bytes_on_page;
3467 0 : if (ShouldMovePage(page, live_bytes_on_page)) {
3468 0 : if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3469 0 : EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3470 : } else {
3471 0 : EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3472 : }
3473 : }
3474 0 : evacuation_job.AddItem(new PageEvacuationItem(page));
3475 : }
3476 0 : if (evacuation_job.NumberOfItems() == 0) return;
3477 :
3478 : YoungGenerationMigrationObserver observer(heap(),
3479 0 : heap()->mark_compact_collector());
3480 : YoungGenerationRecordMigratedSlotVisitor record_visitor(
3481 0 : heap()->mark_compact_collector());
3482 : CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
3483 0 : this, &evacuation_job, &record_visitor, &observer, live_bytes);
3484 : }
3485 :
3486 56800 : class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3487 : public:
3488 113600 : virtual Object* RetainAs(Object* object) {
3489 113600 : if (object->IsHeapObject()) {
3490 : HeapObject* heap_object = HeapObject::cast(object);
3491 : MapWord map_word = heap_object->map_word();
3492 113600 : if (map_word.IsForwardingAddress()) {
3493 759 : return map_word.ToForwardingAddress();
3494 : }
3495 : }
3496 112841 : return object;
3497 : }
3498 : };
3499 :
3500 423931 : int MarkCompactCollector::Sweeper::RawSweep(
3501 : Page* p, FreeListRebuildingMode free_list_mode,
3502 : FreeSpaceTreatmentMode free_space_mode) {
3503 2347168 : Space* space = p->owner();
3504 : DCHECK_NOT_NULL(space);
3505 : DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
3506 : space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
3507 : DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3508 :
3509 : // TODO(ulan): we don't have to clear type old-to-old slots in code space
3510 : // because the concurrent marker doesn't mark code objects. This requires
3511 : // the write barrier for code objects to check the color of the code object.
3512 846411 : bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
3513 : p->typed_slot_set<OLD_TO_OLD>() != nullptr;
3514 :
3515 : // The free ranges map is used for filtering typed slots.
3516 : std::map<uint32_t, uint32_t> free_ranges;
3517 :
3518 : // Before we sweep objects on the page, we free dead array buffers which
3519 : // requires valid mark bits.
3520 423931 : ArrayBufferTracker::FreeDead(p, marking_state_);
3521 :
3522 : Address free_start = p->area_start();
3523 : DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
3524 :
3525 : // If we use the skip list for code space pages, we have to lock the skip
3526 : // list because it could be accessed concurrently by the runtime or the
3527 : // deoptimizer.
3528 : const bool rebuild_skip_list =
3529 568251 : space->identity() == CODE_SPACE && p->skip_list() != nullptr;
3530 : SkipList* skip_list = p->skip_list();
3531 423119 : if (rebuild_skip_list) {
3532 : skip_list->Clear();
3533 : }
3534 :
3535 : intptr_t live_bytes = 0;
3536 : intptr_t freed_bytes = 0;
3537 : intptr_t max_freed_bytes = 0;
3538 : int curr_region = -1;
3539 :
3540 : // Set the allocated_bytes counter to area_size. The free operations below
3541 : // will decrease the counter to actual live bytes.
3542 423119 : p->ResetAllocatedBytes();
3543 :
3544 573620811 : for (auto object_and_size :
3545 574044932 : LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
3546 : HeapObject* const object = object_and_size.first;
3547 : DCHECK(marking_state_->IsBlack(object));
3548 573196467 : Address free_end = object->address();
3549 573196467 : if (free_end != free_start) {
3550 28697071 : CHECK_GT(free_end, free_start);
3551 28697071 : size_t size = static_cast<size_t>(free_end - free_start);
3552 28697071 : if (free_space_mode == ZAP_FREE_SPACE) {
3553 : memset(free_start, 0xcc, size);
3554 : }
3555 28704947 : if (free_list_mode == REBUILD_FREE_LIST) {
3556 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3557 28466097 : free_start, size);
3558 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3559 : } else {
3560 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3561 408814 : ClearRecordedSlots::kNo);
3562 : }
3563 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
3564 28669969 : SlotSet::KEEP_EMPTY_BUCKETS);
3565 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
3566 28709417 : SlotSet::KEEP_EMPTY_BUCKETS);
3567 28715491 : if (non_empty_typed_slots) {
3568 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
3569 11413 : static_cast<uint32_t>(free_start - p->address()),
3570 34239 : static_cast<uint32_t>(free_end - p->address())));
3571 : }
3572 : }
3573 : Map* map = object->synchronized_map();
3574 573214887 : int size = object->SizeFromMap(map);
3575 570257182 : live_bytes += size;
3576 570257182 : if (rebuild_skip_list) {
3577 : int new_region_start = SkipList::RegionNumber(free_end);
3578 : int new_region_end =
3579 55255517 : SkipList::RegionNumber(free_end + size - kPointerSize);
3580 55255517 : if (new_region_start != curr_region || new_region_end != curr_region) {
3581 : skip_list->AddObject(free_end, size);
3582 : curr_region = new_region_end;
3583 : }
3584 : }
3585 570257182 : free_start = free_end + size;
3586 : }
3587 :
3588 424344 : if (free_start != p->area_end()) {
3589 362474 : CHECK_GT(p->area_end(), free_start);
3590 362474 : size_t size = static_cast<size_t>(p->area_end() - free_start);
3591 362474 : if (free_space_mode == ZAP_FREE_SPACE) {
3592 : memset(free_start, 0xcc, size);
3593 : }
3594 362462 : if (free_list_mode == REBUILD_FREE_LIST) {
3595 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3596 361566 : free_start, size);
3597 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3598 : } else {
3599 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3600 1764 : ClearRecordedSlots::kNo);
3601 : }
3602 :
3603 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
3604 362448 : SlotSet::KEEP_EMPTY_BUCKETS);
3605 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
3606 362436 : SlotSet::KEEP_EMPTY_BUCKETS);
3607 362414 : if (non_empty_typed_slots) {
3608 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
3609 1281 : static_cast<uint32_t>(free_start - p->address()),
3610 3843 : static_cast<uint32_t>(p->area_end() - p->address())));
3611 : }
3612 : }
3613 :
3614 : // Clear invalid typed slots after collection all free ranges.
3615 424284 : if (!free_ranges.empty()) {
3616 : TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
3617 1281 : if (old_to_new != nullptr) {
3618 1279 : old_to_new->RemoveInvaldSlots(free_ranges);
3619 : }
3620 : TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
3621 1281 : if (old_to_old != nullptr) {
3622 5 : old_to_old->RemoveInvaldSlots(free_ranges);
3623 : }
3624 : }
3625 :
3626 424284 : marking_state_->bitmap(p)->Clear();
3627 424343 : if (free_list_mode == IGNORE_FREE_LIST) {
3628 : marking_state_->SetLiveBytes(p, 0);
3629 : // We did not free memory, so have to adjust allocated bytes here.
3630 1140 : intptr_t freed_bytes = p->area_size() - live_bytes;
3631 : p->DecreaseAllocatedBytes(freed_bytes);
3632 : } else {
3633 : // Keep the old live bytes counter of the page until RefillFreeList, where
3634 : // the space size is refined.
3635 : // The allocated_bytes() counter is precisely the total size of objects.
3636 : DCHECK_EQ(live_bytes, p->allocated_bytes());
3637 : }
3638 : p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3639 424357 : if (free_list_mode == IGNORE_FREE_LIST) return 0;
3640 846444 : return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3641 : }
3642 :
3643 : // Return true if the given code is deoptimized or will be deoptimized.
3644 0 : bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3645 0 : return code->is_optimized_code() && code->marked_for_deoptimization();
3646 : }
3647 :
3648 0 : void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
3649 0 : EvacuateRecordOnlyVisitor visitor(heap());
3650 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3651 : &visitor,
3652 0 : LiveObjectVisitor::kKeepMarking);
3653 0 : }
3654 :
3655 : template <class Visitor, typename MarkingState>
3656 3738 : bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
3657 : MarkingState* marking_state,
3658 : Visitor* visitor,
3659 : IterationMode iteration_mode,
3660 : HeapObject** failed_object) {
3661 10249130 : for (auto object_and_size :
3662 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3663 : HeapObject* const object = object_and_size.first;
3664 5118976 : if (!visitor->Visit(object, object_and_size.second)) {
3665 28 : if (iteration_mode == kClearMarkbits) {
3666 56 : marking_state->bitmap(chunk)->ClearRange(
3667 : chunk->AddressToMarkbitIndex(chunk->area_start()),
3668 : chunk->AddressToMarkbitIndex(object->address()));
3669 28 : *failed_object = object;
3670 : }
3671 28 : return false;
3672 : }
3673 : }
3674 3720 : if (iteration_mode == kClearMarkbits) {
3675 : marking_state->ClearLiveness(chunk);
3676 : }
3677 : return true;
3678 : }
3679 :
3680 : template <class Visitor, typename MarkingState>
3681 54010 : void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
3682 : MarkingState* marking_state,
3683 : Visitor* visitor,
3684 : IterationMode iteration_mode) {
3685 66057702 : for (auto object_and_size :
3686 : LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3687 : HeapObject* const object = object_and_size.first;
3688 : DCHECK(marking_state->IsBlack(object));
3689 28250827 : const bool success = visitor->Visit(object, object_and_size.second);
3690 : USE(success);
3691 : DCHECK(success);
3692 : }
3693 54045 : if (iteration_mode == kClearMarkbits) {
3694 : marking_state->ClearLiveness(chunk);
3695 : }
3696 54045 : }
3697 :
3698 : template <class Visitor, typename MarkingState>
3699 0 : void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
3700 : MarkingState* marking_state,
3701 : Visitor* visitor,
3702 : IterationMode iteration_mode) {
3703 0 : for (auto object_and_size :
3704 : LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
3705 : HeapObject* const object = object_and_size.first;
3706 : DCHECK(marking_state->IsGrey(object));
3707 0 : const bool success = visitor->Visit(object, object_and_size.second);
3708 : USE(success);
3709 : DCHECK(success);
3710 : }
3711 0 : if (iteration_mode == kClearMarkbits) {
3712 : marking_state->ClearLiveness(chunk);
3713 : }
3714 0 : }
3715 :
3716 : template <typename MarkingState>
3717 28 : void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
3718 : MarkingState* marking_state) {
3719 : int new_live_size = 0;
3720 200 : for (auto object_and_size :
3721 : LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
3722 58 : new_live_size += object_and_size.second;
3723 : }
3724 28 : marking_state->SetLiveBytes(chunk, new_live_size);
3725 28 : }
3726 :
3727 0 : void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3728 : Page* page) {
3729 0 : base::LockGuard<base::Mutex> guard(&mutex_);
3730 0 : swept_list_[space->identity()].push_back(page);
3731 0 : }
3732 :
3733 56800 : void MarkCompactCollector::Evacuate() {
3734 738400 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3735 56800 : base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
3736 : CodeSpaceMemoryModificationScope code_modifcation(heap());
3737 :
3738 : {
3739 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
3740 113600 : EvacuatePrologue();
3741 : }
3742 :
3743 : {
3744 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3745 : EvacuationScope evacuation_scope(this);
3746 113600 : EvacuatePagesInParallel();
3747 : }
3748 :
3749 56800 : UpdatePointersAfterEvacuation();
3750 :
3751 : {
3752 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
3753 56800 : if (!heap()->new_space()->Rebalance()) {
3754 0 : FatalProcessOutOfMemory("NewSpace::Rebalance");
3755 56800 : }
3756 : }
3757 :
3758 : // Give pages that are queued to be freed back to the OS. Note that filtering
3759 : // slots only handles old space (for unboxed doubles), and thus map space can
3760 : // still contain stale pointers. We only free the chunks after pointer updates
3761 : // to still have access to page headers.
3762 56800 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3763 :
3764 : {
3765 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3766 :
3767 182558 : for (Page* p : new_space_evacuation_pages_) {
3768 68958 : if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3769 : p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3770 995 : sweeper().AddPage(p->owner()->identity(), p);
3771 67963 : } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3772 : p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3773 : p->ForAllFreeListCategories(
3774 : [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3775 577 : sweeper().AddPage(p->owner()->identity(), p);
3776 : }
3777 : }
3778 : new_space_evacuation_pages_.clear();
3779 :
3780 117348 : for (Page* p : old_space_evacuation_pages_) {
3781 : // Important: skip list should be cleared only after roots were updated
3782 : // because root iteration traverses the stack and might have to find
3783 : // code objects from non-updated pc pointing into evacuation candidate.
3784 3748 : SkipList* list = p->skip_list();
3785 3748 : if (list != nullptr) list->Clear();
3786 3748 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3787 28 : sweeper().AddPage(p->owner()->identity(), p);
3788 : p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3789 : }
3790 56800 : }
3791 : }
3792 :
3793 : {
3794 227200 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
3795 113600 : EvacuateEpilogue();
3796 56800 : }
3797 :
3798 : #ifdef VERIFY_HEAP
3799 : if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3800 : FullEvacuationVerifier verifier(heap());
3801 : verifier.Run();
3802 : }
3803 : #endif
3804 56800 : }
3805 :
3806 353129 : class UpdatingItem : public ItemParallelJob::Item {
3807 : public:
3808 353129 : virtual ~UpdatingItem() {}
3809 : virtual void Process() = 0;
3810 : };
3811 :
3812 461208 : class PointersUpdatingTask : public ItemParallelJob::Task {
3813 : public:
3814 : explicit PointersUpdatingTask(Isolate* isolate)
3815 230853 : : ItemParallelJob::Task(isolate) {}
3816 :
3817 210317 : void RunInParallel() override {
3818 : UpdatingItem* item = nullptr;
3819 773662 : while ((item = GetItem<UpdatingItem>()) != nullptr) {
3820 352887 : item->Process();
3821 352918 : item->MarkFinished();
3822 : }
3823 210319 : };
3824 : };
3825 :
3826 : template <typename MarkingState>
3827 : class ToSpaceUpdatingItem : public UpdatingItem {
3828 : public:
3829 : explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
3830 : MarkingState* marking_state)
3831 : : chunk_(chunk),
3832 : start_(start),
3833 : end_(end),
3834 58210 : marking_state_(marking_state) {}
3835 116420 : virtual ~ToSpaceUpdatingItem() {}
3836 :
3837 115425 : void Process() override {
3838 116420 : if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3839 : // New->new promoted pages contain garbage so they require iteration using
3840 : // markbits.
3841 995 : ProcessVisitLive();
3842 : } else {
3843 57215 : ProcessVisitAll();
3844 : }
3845 58210 : }
3846 :
3847 : private:
3848 59392 : void ProcessVisitAll() {
3849 59392 : PointersUpdatingVisitor visitor;
3850 17369733 : for (Address cur = start_; cur < end_;) {
3851 17253126 : HeapObject* object = HeapObject::FromAddress(cur);
3852 : Map* map = object->map();
3853 17253126 : int size = object->SizeFromMap(map);
3854 17255686 : object->IterateBody(map->instance_type(), size, &visitor);
3855 17250949 : cur += size;
3856 : }
3857 57215 : }
3858 :
3859 994 : void ProcessVisitLive() {
3860 : // For young generation evacuations we want to visit grey objects, for
3861 : // full MC, we need to visit black objects.
3862 994 : PointersUpdatingVisitor visitor;
3863 7963402 : for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
3864 : chunk_, marking_state_->bitmap(chunk_))) {
3865 3980209 : object_and_size.first->IterateBodyFast(&visitor);
3866 : }
3867 995 : }
3868 :
3869 : MemoryChunk* chunk_;
3870 : Address start_;
3871 : Address end_;
3872 : MarkingState* marking_state_;
3873 : };
3874 :
3875 : template <typename MarkingState>
3876 : class RememberedSetUpdatingItem : public UpdatingItem {
3877 : public:
3878 : explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
3879 : MemoryChunk* chunk,
3880 : RememberedSetUpdatingMode updating_mode)
3881 : : heap_(heap),
3882 : marking_state_(marking_state),
3883 : chunk_(chunk),
3884 227666 : updating_mode_(updating_mode) {}
3885 455332 : virtual ~RememberedSetUpdatingItem() {}
3886 :
3887 227292 : void Process() override {
3888 227292 : base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
3889 227562 : UpdateUntypedPointers();
3890 227554 : UpdateTypedPointers();
3891 227604 : }
3892 :
3893 : private:
3894 : template <AccessMode access_mode>
3895 48018028 : inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
3896 : Object** slot = reinterpret_cast<Object**>(slot_address);
3897 96036056 : if (heap_->InFromSpace(*slot)) {
3898 : HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
3899 : DCHECK(heap_object->IsHeapObject());
3900 : MapWord map_word = heap_object->map_word();
3901 36882348 : if (map_word.IsForwardingAddress()) {
3902 : if (access_mode == AccessMode::ATOMIC) {
3903 : HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
3904 : base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
3905 : map_word.ToForwardingAddress());
3906 : } else {
3907 26404707 : *slot = map_word.ToForwardingAddress();
3908 : }
3909 : }
3910 : // If the object was in from space before and is after executing the
3911 : // callback in to space, the object is still live.
3912 : // Unfortunately, we do not know about the slot. It could be in a
3913 : // just freed free space object.
3914 73764696 : if (heap_->InToSpace(*slot)) {
3915 : return KEEP_SLOT;
3916 : }
3917 11135680 : } else if (heap_->InToSpace(*slot)) {
3918 : // Slots can point to "to" space if the page has been moved, or if the
3919 : // slot has been recorded multiple times in the remembered set, or
3920 : // if the slot was already updated during old->old updating.
3921 : // In case the page has been moved, check markbits to determine liveness
3922 : // of the slot. In the other case, the slot can just be kept.
3923 : HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
3924 2749194 : if (Page::FromAddress(heap_object->address())
3925 : ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3926 : // IsBlackOrGrey is required because objects are marked as grey for
3927 : // the young generation collector while they are black for the full
3928 : // MC.);
3929 1376225 : if (marking_state_->IsBlackOrGrey(heap_object)) {
3930 : return KEEP_SLOT;
3931 : } else {
3932 991 : return REMOVE_SLOT;
3933 : }
3934 : }
3935 : return KEEP_SLOT;
3936 : } else {
3937 : DCHECK(!heap_->InNewSpace(*slot));
3938 : }
3939 : return REMOVE_SLOT;
3940 : }
3941 :
3942 227560 : void UpdateUntypedPointers() {
3943 682688 : if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
3944 222793 : RememberedSet<OLD_TO_NEW>::Iterate(
3945 : chunk_,
3946 : [this](Address slot) {
3947 47744058 : return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
3948 47744058 : },
3949 : SlotSet::PREFREE_EMPTY_BUCKETS);
3950 : }
3951 455158 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3952 227590 : (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
3953 6460 : InvalidatedSlotsFilter filter(chunk_);
3954 12908 : RememberedSet<OLD_TO_OLD>::Iterate(
3955 : chunk_,
3956 8243462 : [&filter](Address slot) {
3957 8243462 : if (!filter.IsValid(slot)) return REMOVE_SLOT;
3958 : return UpdateSlot<AccessMode::NON_ATOMIC>(
3959 8244535 : reinterpret_cast<Object**>(slot));
3960 : },
3961 6454 : SlotSet::PREFREE_EMPTY_BUCKETS);
3962 : }
3963 455104 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3964 227538 : chunk_->invalidated_slots() != nullptr) {
3965 : #ifdef DEBUG
3966 : for (auto object_size : *chunk_->invalidated_slots()) {
3967 : HeapObject* object = object_size.first;
3968 : int size = object_size.second;
3969 : DCHECK_LE(object->SizeFromMap(object->map()), size);
3970 : }
3971 : #endif
3972 : // The invalidated slots are not needed after old-to-old slots were
3973 : // processsed.
3974 199 : chunk_->ReleaseInvalidatedSlots();
3975 : }
3976 227567 : }
3977 :
3978 227515 : void UpdateTypedPointers() {
3979 230683 : Isolate* isolate = heap_->isolate();
3980 455020 : if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3981 : nullptr) {
3982 2754 : CHECK_NE(chunk_->owner(), heap_->map_space());
3983 5508 : RememberedSet<OLD_TO_NEW>::IterateTyped(
3984 : chunk_,
3985 : [isolate, this](SlotType slot_type, Address host_addr, Address slot) {
3986 : return UpdateTypedSlotHelper::UpdateTypedSlot(
3987 : isolate, slot_type, slot, [this](Object** slot) {
3988 93808 : return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
3989 : reinterpret_cast<Address>(slot));
3990 187778 : });
3991 96724 : });
3992 : }
3993 454959 : if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3994 227505 : (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3995 : nullptr)) {
3996 1242 : CHECK_NE(chunk_->owner(), heap_->map_space());
3997 828 : RememberedSet<OLD_TO_OLD>::IterateTyped(
3998 : chunk_,
3999 : [isolate](SlotType slot_type, Address host_addr, Address slot) {
4000 : return UpdateTypedSlotHelper::UpdateTypedSlot(
4001 32693 : isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
4002 33107 : });
4003 : }
4004 227454 : }
4005 :
4006 : Heap* heap_;
4007 : MarkingState* marking_state_;
4008 : MemoryChunk* chunk_;
4009 : RememberedSetUpdatingMode updating_mode_;
4010 : };
4011 :
4012 0 : UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4013 : MemoryChunk* chunk, Address start, Address end) {
4014 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4015 0 : chunk, start, end, non_atomic_marking_state());
4016 : }
4017 :
4018 58210 : UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
4019 : MemoryChunk* chunk, Address start, Address end) {
4020 : return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4021 116420 : chunk, start, end, non_atomic_marking_state());
4022 : }
4023 :
4024 0 : UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4025 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4026 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4027 0 : heap(), non_atomic_marking_state(), chunk, updating_mode);
4028 : }
4029 :
4030 227666 : UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
4031 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4032 : return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4033 455332 : heap(), non_atomic_marking_state(), chunk, updating_mode);
4034 : }
4035 :
4036 : class GlobalHandlesUpdatingItem : public UpdatingItem {
4037 : public:
4038 : GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
4039 : size_t end)
4040 0 : : global_handles_(global_handles), start_(start), end_(end) {}
4041 0 : virtual ~GlobalHandlesUpdatingItem() {}
4042 :
4043 0 : void Process() override {
4044 0 : PointersUpdatingVisitor updating_visitor;
4045 0 : global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
4046 0 : }
4047 :
4048 : private:
4049 : GlobalHandles* global_handles_;
4050 : size_t start_;
4051 : size_t end_;
4052 : };
4053 :
4054 : // Update array buffers on a page that has been evacuated by copying objects.
4055 : // Target page exclusivity in old space is guaranteed by the fact that
4056 : // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
4057 : // free list items of a given page. For new space the tracker will update
4058 : // using a lock.
4059 : class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
4060 : public:
4061 : enum EvacuationState { kRegular, kAborted };
4062 :
4063 : explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
4064 67253 : : page_(page), state_(state) {}
4065 134506 : virtual ~ArrayBufferTrackerUpdatingItem() {}
4066 :
4067 67235 : void Process() override {
4068 67235 : switch (state_) {
4069 : case EvacuationState::kRegular:
4070 : ArrayBufferTracker::ProcessBuffers(
4071 67238 : page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
4072 67222 : break;
4073 : case EvacuationState::kAborted:
4074 : ArrayBufferTracker::ProcessBuffers(
4075 0 : page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
4076 0 : break;
4077 : }
4078 67219 : }
4079 :
4080 : private:
4081 : Page* const page_;
4082 : const EvacuationState state_;
4083 : };
4084 :
4085 56800 : int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
4086 113600 : ItemParallelJob* job) {
4087 : // Seed to space pages.
4088 56800 : const Address space_start = heap()->new_space()->bottom();
4089 56800 : const Address space_end = heap()->new_space()->top();
4090 : int pages = 0;
4091 286820 : for (Page* page : PageRange(space_start, space_end)) {
4092 : Address start =
4093 59620 : page->Contains(space_start) ? space_start : page->area_start();
4094 58210 : Address end = page->Contains(space_end) ? space_end : page->area_end();
4095 58210 : job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
4096 58210 : pages++;
4097 : }
4098 56800 : if (pages == 0) return 0;
4099 56800 : return NumberOfParallelToSpacePointerUpdateTasks(pages);
4100 : }
4101 :
4102 : template <typename IterateableSpace>
4103 227200 : int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
4104 : ItemParallelJob* job, IterateableSpace* space,
4105 : RememberedSetUpdatingMode mode) {
4106 : int pages = 0;
4107 1757816 : for (MemoryChunk* chunk : *space) {
4108 : const bool contains_old_to_old_slots =
4109 : chunk->slot_set<OLD_TO_OLD>() != nullptr ||
4110 862480 : chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
4111 : const bool contains_old_to_new_slots =
4112 : chunk->slot_set<OLD_TO_NEW>() != nullptr ||
4113 646056 : chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
4114 : const bool contains_invalidated_slots =
4115 434472 : chunk->invalidated_slots() != nullptr;
4116 434472 : if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
4117 : !contains_invalidated_slots)
4118 : continue;
4119 227666 : if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
4120 : contains_invalidated_slots) {
4121 227666 : job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
4122 227666 : pages++;
4123 : }
4124 : }
4125 227200 : return pages;
4126 : }
4127 :
4128 0 : int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4129 : ItemParallelJob* job) {
4130 : int pages = 0;
4131 0 : for (Page* p : new_space_evacuation_pages_) {
4132 0 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4133 0 : if (p->local_tracker() == nullptr) continue;
4134 :
4135 0 : pages++;
4136 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4137 0 : p, ArrayBufferTrackerUpdatingItem::kRegular));
4138 : }
4139 : }
4140 0 : return pages;
4141 : }
4142 :
4143 56800 : int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4144 : ItemParallelJob* job) {
4145 : int pages = 0;
4146 182558 : for (Page* p : new_space_evacuation_pages_) {
4147 68958 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4148 67386 : if (p->local_tracker() == nullptr) continue;
4149 :
4150 66984 : pages++;
4151 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4152 66984 : p, ArrayBufferTrackerUpdatingItem::kRegular));
4153 : }
4154 : }
4155 56800 : return pages;
4156 : }
4157 :
4158 56800 : int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
4159 : ItemParallelJob* job) {
4160 : int pages = 0;
4161 117348 : for (Page* p : old_space_evacuation_pages_) {
4162 7496 : if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
4163 3720 : p->IsEvacuationCandidate()) {
4164 3720 : if (p->local_tracker() == nullptr) continue;
4165 :
4166 269 : pages++;
4167 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4168 269 : p, ArrayBufferTrackerUpdatingItem::kRegular));
4169 : }
4170 : }
4171 113628 : for (auto object_and_page : aborted_evacuation_candidates_) {
4172 : Page* p = object_and_page.second;
4173 28 : if (p->local_tracker() == nullptr) continue;
4174 :
4175 0 : pages++;
4176 : job->AddItem(new ArrayBufferTrackerUpdatingItem(
4177 0 : p, ArrayBufferTrackerUpdatingItem::kAborted));
4178 : }
4179 56800 : return pages;
4180 : }
4181 :
4182 56800 : void MarkCompactCollector::UpdatePointersAfterEvacuation() {
4183 681600 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4184 :
4185 56800 : PointersUpdatingVisitor updating_visitor;
4186 :
4187 : {
4188 227200 : TRACE_GC(heap()->tracer(),
4189 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4190 113600 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4191 : }
4192 :
4193 : {
4194 227200 : TRACE_GC(heap()->tracer(),
4195 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
4196 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
4197 170400 : &page_parallel_job_semaphore_);
4198 :
4199 : int remembered_set_pages = 0;
4200 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4201 56800 : &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
4202 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4203 56800 : &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
4204 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4205 56800 : &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
4206 : const int remembered_set_tasks =
4207 : remembered_set_pages == 0
4208 : ? 0
4209 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
4210 56800 : old_to_new_slots_);
4211 56800 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4212 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
4213 202263 : for (int i = 0; i < num_tasks; i++) {
4214 145463 : updating_job.AddTask(new PointersUpdatingTask(isolate()));
4215 : }
4216 113600 : updating_job.Run();
4217 : }
4218 :
4219 : {
4220 : // - Update pointers in map space in a separate phase to avoid data races
4221 : // with Map->LayoutDescriptor edge.
4222 : // - Update array buffer trackers in the second phase to have access to
4223 : // byte length which is potentially a HeapNumber.
4224 227200 : TRACE_GC(heap()->tracer(),
4225 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
4226 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
4227 113600 : &page_parallel_job_semaphore_);
4228 :
4229 : int array_buffer_pages = 0;
4230 56800 : array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
4231 56800 : array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
4232 :
4233 : int remembered_set_pages = 0;
4234 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4235 56800 : &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
4236 : const int remembered_set_tasks =
4237 : remembered_set_pages == 0
4238 : ? 0
4239 : : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
4240 56800 : old_to_new_slots_);
4241 : const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
4242 56800 : if (num_tasks > 0) {
4243 85390 : for (int i = 0; i < num_tasks; i++) {
4244 85390 : updating_job.AddTask(new PointersUpdatingTask(isolate()));
4245 : }
4246 56800 : updating_job.Run();
4247 56800 : }
4248 : }
4249 :
4250 : {
4251 227200 : TRACE_GC(heap()->tracer(),
4252 : GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4253 : // Update pointers from external string table.
4254 : heap_->UpdateReferencesInExternalStringTable(
4255 56800 : &UpdateReferenceInExternalStringTableEntry);
4256 :
4257 56800 : EvacuationWeakObjectRetainer evacuation_object_retainer;
4258 113600 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4259 56800 : }
4260 56800 : }
4261 :
4262 0 : void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4263 0 : TRACE_GC(heap()->tracer(),
4264 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
4265 :
4266 0 : PointersUpdatingVisitor updating_visitor;
4267 : ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
4268 0 : &page_parallel_job_semaphore_);
4269 :
4270 0 : CollectNewSpaceArrayBufferTrackerItems(&updating_job);
4271 : // Create batches of global handles.
4272 : SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
4273 0 : &updating_job);
4274 0 : const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4275 : int remembered_set_pages = 0;
4276 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4277 : &updating_job, heap()->old_space(),
4278 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4279 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4280 : &updating_job, heap()->code_space(),
4281 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4282 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4283 : &updating_job, heap()->map_space(),
4284 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4285 : remembered_set_pages += CollectRememberedSetUpdatingItems(
4286 : &updating_job, heap()->lo_space(),
4287 0 : RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4288 : const int remembered_set_tasks = NumberOfParallelPointerUpdateTasks(
4289 0 : remembered_set_pages, old_to_new_slots_);
4290 : const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
4291 0 : for (int i = 0; i < num_tasks; i++) {
4292 0 : updating_job.AddTask(new PointersUpdatingTask(isolate()));
4293 : }
4294 :
4295 : {
4296 0 : TRACE_GC(heap()->tracer(),
4297 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4298 0 : heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
4299 : }
4300 : {
4301 0 : TRACE_GC(heap()->tracer(),
4302 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
4303 0 : updating_job.Run();
4304 : }
4305 :
4306 : {
4307 0 : TRACE_GC(heap()->tracer(),
4308 : GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
4309 :
4310 0 : EvacuationWeakObjectRetainer evacuation_object_retainer;
4311 0 : heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4312 :
4313 : // Update pointers from external string table.
4314 : heap()->UpdateNewSpaceReferencesInExternalStringTable(
4315 0 : &UpdateReferenceInExternalStringTableEntry);
4316 0 : heap()->IterateEncounteredWeakCollections(&updating_visitor);
4317 0 : }
4318 0 : }
4319 :
4320 28 : void MarkCompactCollector::ReportAbortedEvacuationCandidate(
4321 : HeapObject* failed_object, Page* page) {
4322 28 : base::LockGuard<base::Mutex> guard(&mutex_);
4323 :
4324 : page->SetFlag(Page::COMPACTION_WAS_ABORTED);
4325 56 : aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
4326 28 : }
4327 :
4328 47003 : void MarkCompactCollector::PostProcessEvacuationCandidates() {
4329 94034 : for (auto object_and_page : aborted_evacuation_candidates_) {
4330 : HeapObject* failed_object = object_and_page.first;
4331 : Page* page = object_and_page.second;
4332 : DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
4333 : // Aborted compaction page. We have to record slots here, since we
4334 : // might not have recorded them in first place.
4335 :
4336 : // Remove outdated slots.
4337 : RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
4338 : failed_object->address(),
4339 28 : SlotSet::PREFREE_EMPTY_BUCKETS);
4340 : RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
4341 28 : failed_object->address());
4342 : // Recompute live bytes.
4343 28 : LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
4344 : // Re-record slots.
4345 28 : EvacuateRecordOnlyVisitor record_visitor(heap());
4346 : LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
4347 : &record_visitor,
4348 28 : LiveObjectVisitor::kKeepMarking);
4349 : // Array buffers will be processed during pointer updating.
4350 : }
4351 : const int aborted_pages =
4352 94006 : static_cast<int>(aborted_evacuation_candidates_.size());
4353 : int aborted_pages_verified = 0;
4354 97754 : for (Page* p : old_space_evacuation_pages_) {
4355 3748 : if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4356 : // After clearing the evacuation candidate flag the page is again in a
4357 : // regular state.
4358 : p->ClearEvacuationCandidate();
4359 : aborted_pages_verified++;
4360 : } else {
4361 : DCHECK(p->IsEvacuationCandidate());
4362 : DCHECK(p->SweepingDone());
4363 3720 : p->Unlink();
4364 : }
4365 : }
4366 : DCHECK_EQ(aborted_pages_verified, aborted_pages);
4367 47003 : if (FLAG_trace_evacuation && (aborted_pages > 0)) {
4368 : PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
4369 0 : isolate()->time_millis_since_init(), aborted_pages);
4370 : }
4371 47003 : }
4372 :
4373 56800 : void MarkCompactCollector::ReleaseEvacuationCandidates() {
4374 117348 : for (Page* p : old_space_evacuation_pages_) {
4375 3748 : if (!p->IsEvacuationCandidate()) continue;
4376 : PagedSpace* space = static_cast<PagedSpace*>(p->owner());
4377 : non_atomic_marking_state()->SetLiveBytes(p, 0);
4378 3720 : CHECK(p->SweepingDone());
4379 3720 : space->ReleasePage(p);
4380 : }
4381 : old_space_evacuation_pages_.clear();
4382 56800 : compacting_ = false;
4383 56800 : }
4384 :
4385 655622 : void MarkCompactCollector::Sweeper::SweepSpaceFromTask(
4386 : AllocationSpace identity) {
4387 : Page* page = nullptr;
4388 1674860 : while (!stop_sweeper_tasks_.Value() &&
4389 : ((page = GetSweepingPageSafe(identity)) != nullptr)) {
4390 363468 : ParallelSweepPage(page, identity);
4391 : }
4392 656019 : }
4393 :
4394 261596 : int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
4395 : int required_freed_bytes,
4396 : int max_pages) {
4397 : int max_freed = 0;
4398 : int pages_freed = 0;
4399 : Page* page = nullptr;
4400 550704 : while ((page = GetSweepingPageSafe(identity)) != nullptr) {
4401 60754 : int freed = ParallelSweepPage(page, identity);
4402 60755 : pages_freed += 1;
4403 : DCHECK_GE(freed, 0);
4404 : max_freed = Max(max_freed, freed);
4405 60755 : if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
4406 : return max_freed;
4407 28754 : if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
4408 : }
4409 : return max_freed;
4410 : }
4411 :
4412 428704 : int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
4413 : AllocationSpace identity) {
4414 : // Early bailout for pages that are swept outside of the regular sweeping
4415 : // path. This check here avoids taking the lock first, avoiding deadlocks.
4416 855034 : if (page->SweepingDone()) return 0;
4417 :
4418 : int max_freed = 0;
4419 : {
4420 426330 : base::LockGuard<base::RecursiveMutex> guard(page->mutex());
4421 : // If this page was already swept in the meantime, we can return here.
4422 426362 : if (page->SweepingDone()) return 0;
4423 :
4424 : // If the page is a code page, the CodePageMemoryModificationScope changes
4425 : // the page protection mode from read+execute to read+write while sweeping.
4426 848428 : CodePageMemoryModificationScope code_page_scope(page);
4427 :
4428 : DCHECK_EQ(Page::kSweepingPending,
4429 : page->concurrent_sweeping_state().Value());
4430 423990 : page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4431 : const FreeSpaceTreatmentMode free_space_mode =
4432 : Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
4433 424074 : if (identity == NEW_SPACE) {
4434 995 : RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
4435 : } else {
4436 423079 : max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
4437 : }
4438 : DCHECK(page->SweepingDone());
4439 :
4440 : // After finishing sweeping of a page we clean up its remembered set.
4441 424221 : TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
4442 424221 : if (typed_slot_set) {
4443 1279 : typed_slot_set->FreeToBeFreedChunks();
4444 : }
4445 424221 : SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
4446 424221 : if (slot_set) {
4447 220575 : slot_set->FreeToBeFreedBuckets();
4448 : }
4449 : }
4450 :
4451 : {
4452 424219 : base::LockGuard<base::Mutex> guard(&mutex_);
4453 424233 : swept_list_[identity].push_back(page);
4454 : }
4455 424229 : return max_freed;
4456 : }
4457 :
4458 0 : void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
4459 : DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
4460 424233 : PrepareToBeSweptPage(space, page);
4461 424233 : sweeping_list_[space].push_back(page);
4462 0 : }
4463 :
4464 424233 : void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
4465 423238 : Page* page) {
4466 : page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
4467 : DCHECK_GE(page->area_size(),
4468 : static_cast<size_t>(marking_state_->live_bytes(page)));
4469 424233 : if (space != NEW_SPACE) {
4470 : heap_->paged_space(space)->IncreaseAllocatedBytes(
4471 423238 : marking_state_->live_bytes(page), page);
4472 : }
4473 424233 : }
4474 :
4475 1306050 : Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
4476 : AllocationSpace space) {
4477 1306050 : base::LockGuard<base::Mutex> guard(&mutex_);
4478 : Page* page = nullptr;
4479 2613132 : if (!sweeping_list_[space].empty()) {
4480 424233 : page = sweeping_list_[space].front();
4481 424233 : sweeping_list_[space].pop_front();
4482 : }
4483 1306486 : return page;
4484 : }
4485 :
4486 170400 : void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
4487 170400 : space->ClearStats();
4488 :
4489 : int will_be_swept = 0;
4490 : bool unused_page_present = false;
4491 :
4492 : // Loop needs to support deletion if live bytes == 0 for a page.
4493 774550 : for (auto it = space->begin(); it != space->end();) {
4494 430002 : Page* p = *(it++);
4495 : DCHECK(p->SweepingDone());
4496 :
4497 433750 : if (p->IsEvacuationCandidate()) {
4498 : // Will be processed in Evacuate.
4499 : DCHECK(!evacuation_candidates_.empty());
4500 : continue;
4501 : }
4502 :
4503 430002 : if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
4504 : // We need to sweep the page to get it into an iterable state again. Note
4505 : // that this adds unusable memory into the free list that is later on
4506 : // (in the free list) dropped again. Since we only use the flag for
4507 : // testing this is fine.
4508 : p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4509 145 : sweeper().RawSweep(p, Sweeper::IGNORE_FREE_LIST,
4510 : Heap::ShouldZapGarbage()
4511 : ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
4512 145 : : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
4513 : space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
4514 : continue;
4515 : }
4516 :
4517 : // One unused page is kept, all further are released before sweeping them.
4518 429857 : if (non_atomic_marking_state()->live_bytes(p) == 0) {
4519 20560 : if (unused_page_present) {
4520 : if (FLAG_gc_verbose) {
4521 : PrintIsolate(isolate(), "sweeping: released page: %p",
4522 : static_cast<void*>(p));
4523 : }
4524 7224 : ArrayBufferTracker::FreeAll(p);
4525 7224 : space->ReleasePage(p);
4526 7224 : continue;
4527 : }
4528 : unused_page_present = true;
4529 : }
4530 :
4531 422633 : sweeper().AddPage(space->identity(), p);
4532 : will_be_swept++;
4533 : }
4534 :
4535 : if (FLAG_gc_verbose) {
4536 : PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
4537 : AllocationSpaceName(space->identity()), will_be_swept);
4538 : }
4539 170400 : }
4540 :
4541 56800 : void MarkCompactCollector::StartSweepSpaces() {
4542 568000 : TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4543 : #ifdef DEBUG
4544 : state_ = SWEEP_SPACES;
4545 : #endif
4546 :
4547 : {
4548 : {
4549 : GCTracer::Scope sweep_scope(heap()->tracer(),
4550 56800 : GCTracer::Scope::MC_SWEEP_OLD);
4551 56800 : StartSweepSpace(heap()->old_space());
4552 : }
4553 : {
4554 : GCTracer::Scope sweep_scope(heap()->tracer(),
4555 56800 : GCTracer::Scope::MC_SWEEP_CODE);
4556 56800 : StartSweepSpace(heap()->code_space());
4557 : }
4558 : {
4559 : GCTracer::Scope sweep_scope(heap()->tracer(),
4560 56800 : GCTracer::Scope::MC_SWEEP_MAP);
4561 56800 : StartSweepSpace(heap()->map_space());
4562 : }
4563 56800 : sweeper().StartSweeping();
4564 56800 : }
4565 56800 : }
4566 :
4567 : } // namespace internal
4568 : } // namespace v8
|