Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/scavenger.h"
6 :
7 : #include "src/heap/array-buffer-collector.h"
8 : #include "src/heap/barrier.h"
9 : #include "src/heap/gc-tracer.h"
10 : #include "src/heap/heap-inl.h"
11 : #include "src/heap/item-parallel-job.h"
12 : #include "src/heap/mark-compact-inl.h"
13 : #include "src/heap/objects-visiting-inl.h"
14 : #include "src/heap/scavenger-inl.h"
15 : #include "src/heap/sweeper.h"
16 : #include "src/objects-body-descriptors-inl.h"
17 : #include "src/utils-inl.h"
18 :
19 : namespace v8 {
20 : namespace internal {
21 :
22 : class PageScavengingItem final : public ItemParallelJob::Item {
23 : public:
24 111137 : explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
25 222274 : ~PageScavengingItem() override = default;
26 :
27 111081 : void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
28 :
29 : private:
30 : MemoryChunk* const chunk_;
31 : };
32 :
33 37603 : class ScavengingTask final : public ItemParallelJob::Task {
34 : public:
35 : ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
36 : : ItemParallelJob::Task(heap->isolate()),
37 : heap_(heap),
38 : scavenger_(scavenger),
39 37629 : barrier_(barrier) {}
40 :
41 37378 : void RunInParallel() final {
42 149538 : TRACE_BACKGROUND_GC(
43 : heap_->tracer(),
44 : GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
45 37383 : double scavenging_time = 0.0;
46 : {
47 37383 : barrier_->Start();
48 : TimedScope scope(&scavenging_time);
49 111081 : PageScavengingItem* item = nullptr;
50 185867 : while ((item = GetItem<PageScavengingItem>()) != nullptr) {
51 111081 : item->Process(scavenger_);
52 110977 : item->MarkFinished();
53 : }
54 38102 : do {
55 38092 : scavenger_->Process(barrier_);
56 38072 : } while (!barrier_->Wait());
57 37408 : scavenger_->Process();
58 : }
59 37394 : if (FLAG_trace_parallel_scavenge) {
60 0 : PrintIsolate(heap_->isolate(),
61 : "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
62 : static_cast<void*>(this), scavenging_time,
63 0 : scavenger_->bytes_copied(), scavenger_->bytes_promoted());
64 37389 : }
65 37409 : };
66 :
67 : private:
68 : Heap* const heap_;
69 : Scavenger* const scavenger_;
70 : OneshotBarrier* const barrier_;
71 : };
72 :
73 0 : class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
74 : public:
75 : IterateAndScavengePromotedObjectsVisitor(Heap* heap, Scavenger* scavenger,
76 : bool record_slots)
77 45612760 : : heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
78 :
79 66801 : V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
80 : ObjectSlot end) final {
81 : VisitPointersImpl(host, start, end);
82 66776 : }
83 :
84 10 : V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
85 : MaybeObjectSlot end) final {
86 : VisitPointersImpl(host, start, end);
87 10 : }
88 :
89 0 : V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
90 0 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
91 0 : HandleSlot(host, FullHeapObjectSlot(&target), target);
92 0 : }
93 0 : V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
94 0 : HeapObject heap_object = rinfo->target_object();
95 0 : HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
96 0 : }
97 :
98 : private:
99 : template <typename TSlot>
100 : V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
101 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
102 : // Treat weak references as strong.
103 : // TODO(marja): Proper weakness handling in the young generation.
104 439760925 : for (TSlot slot = start; slot < end; ++slot) {
105 393035577 : typename TSlot::TObject object = *slot;
106 392370779 : HeapObject heap_object;
107 392370779 : if (object.GetHeapObject(&heap_object)) {
108 573895082 : HandleSlot(host, THeapObjectSlot(slot), heap_object);
109 : }
110 : }
111 : }
112 :
113 : template <typename THeapObjectSlot>
114 : V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
115 : HeapObject target) {
116 : static_assert(
117 : std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
118 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
119 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
120 286956325 : scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
121 :
122 287153745 : if (Heap::InFromSpace(target)) {
123 46895035 : SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
124 46877942 : bool success = (*slot)->GetHeapObject(&target);
125 : USE(success);
126 : DCHECK(success);
127 :
128 46879207 : if (result == KEEP_SLOT) {
129 : SLOW_DCHECK(target->IsHeapObject());
130 757707 : RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
131 757689 : slot.address());
132 : }
133 : SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
134 : HeapObject::cast(target)));
135 483705275 : } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
136 242546039 : HeapObject::cast(target))) {
137 : heap_->mark_compact_collector()->RecordSlot(host, ObjectSlot(slot),
138 192610 : target);
139 : }
140 : }
141 :
142 : Heap* const heap_;
143 : Scavenger* const scavenger_;
144 : const bool record_slots_;
145 : };
146 :
147 69788 : static bool IsUnscavengedHeapObject(Heap* heap, FullObjectSlot p) {
148 139576 : return Heap::InFromSpace(*p) &&
149 69788 : !HeapObject::cast(*p)->map_word().IsForwardingAddress();
150 : }
151 :
152 23594 : class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
153 : public:
154 59027 : Object RetainAs(Object object) override {
155 59027 : if (!Heap::InFromSpace(object)) {
156 59027 : return object;
157 : }
158 :
159 : MapWord map_word = HeapObject::cast(object)->map_word();
160 0 : if (map_word.IsForwardingAddress()) {
161 0 : return map_word.ToForwardingAddress();
162 : }
163 0 : return Object();
164 : }
165 : };
166 :
167 62882 : ScavengerCollector::ScavengerCollector(Heap* heap)
168 125765 : : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
169 :
170 23594 : void ScavengerCollector::CollectGarbage() {
171 : DCHECK(surviving_new_large_objects_.empty());
172 : ItemParallelJob job(isolate_->cancelable_task_manager(),
173 117970 : ¶llel_scavenge_semaphore_);
174 : const int kMainThreadId = 0;
175 : Scavenger* scavengers[kMaxScavengerTasks];
176 23594 : const bool is_logging = isolate_->LogObjectRelocation();
177 23594 : const int num_scavenge_tasks = NumberOfScavengeTasks();
178 23594 : OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
179 47188 : Scavenger::CopiedList copied_list(num_scavenge_tasks);
180 : Scavenger::PromotionList promotion_list(num_scavenge_tasks);
181 61223 : for (int i = 0; i < num_scavenge_tasks; i++) {
182 : scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
183 452799 : &promotion_list, i);
184 75258 : job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
185 : }
186 :
187 : {
188 47188 : Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
189 : // Pause the concurrent sweeper.
190 23594 : Sweeper::PauseOrCompleteScope pause_scope(sweeper);
191 : // Filter out pages from the sweeper that need to be processed for old to
192 : // new slots by the Scavenger. After processing, the Scavenger adds back
193 : // pages that are still unsweeped. This way the Scavenger has exclusive
194 : // access to the slots of a page and can completely avoid any locks on
195 : // the page itself.
196 47188 : Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
197 : filter_scope.FilterOldSpaceSweepingPages(
198 24573 : [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
199 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
200 111137 : heap_, [&job](MemoryChunk* chunk) {
201 222274 : job.AddItem(new PageScavengingItem(chunk));
202 134731 : });
203 :
204 23594 : RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
205 :
206 : {
207 : // Identify weak unmodified handles. Requires an unmodified graph.
208 117970 : TRACE_GC(
209 : heap_->tracer(),
210 : GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
211 : isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
212 70782 : &JSObject::IsUnmodifiedApiObject);
213 : }
214 : {
215 : // Copy roots.
216 117970 : TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
217 47188 : heap_->IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
218 : }
219 : {
220 : // Parallel phase scavenging all copied and promoted objects.
221 117970 : TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
222 23594 : job.Run(isolate_->async_counters());
223 : DCHECK(copied_list.IsEmpty());
224 23594 : DCHECK(promotion_list.IsEmpty());
225 : }
226 : {
227 : // Scavenge weak global handles.
228 117970 : TRACE_GC(heap_->tracer(),
229 : GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
230 : isolate_->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
231 47188 : &IsUnscavengedHeapObject);
232 : isolate_->global_handles()
233 : ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
234 47188 : &root_scavenge_visitor);
235 23594 : scavengers[kMainThreadId]->Process();
236 :
237 : DCHECK(copied_list.IsEmpty());
238 : DCHECK(promotion_list.IsEmpty());
239 : isolate_->global_handles()
240 : ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
241 70782 : &root_scavenge_visitor, &IsUnscavengedHeapObject);
242 : }
243 :
244 : {
245 : // Finalize parallel scavenging.
246 117970 : TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
247 :
248 37629 : for (int i = 0; i < num_scavenge_tasks; i++) {
249 37629 : scavengers[i]->Finalize();
250 37629 : delete scavengers[i];
251 : }
252 :
253 47188 : HandleSurvivingNewLargeObjects();
254 23594 : }
255 : }
256 :
257 : {
258 : // Update references into new space
259 117970 : TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
260 : heap_->UpdateNewSpaceReferencesInExternalStringTable(
261 23594 : &Heap::UpdateNewSpaceReferenceInExternalStringTableEntry);
262 :
263 70782 : heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
264 : }
265 :
266 23594 : if (FLAG_concurrent_marking) {
267 : // Ensure that concurrent marker does not track pages that are
268 : // going to be unmapped.
269 108448 : for (Page* p :
270 22908 : PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
271 171080 : heap_->concurrent_marking()->ClearMemoryChunkData(p);
272 : }
273 : }
274 :
275 23594 : ScavengeWeakObjectRetainer weak_object_retainer;
276 23594 : heap_->ProcessYoungWeakReferences(&weak_object_retainer);
277 :
278 : // Set age mark.
279 23594 : heap_->new_space_->set_age_mark(heap_->new_space()->top());
280 :
281 : {
282 117970 : TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
283 47188 : ArrayBufferTracker::PrepareToFreeDeadInNewSpace(heap_);
284 : }
285 47188 : heap_->array_buffer_collector()->FreeAllocations();
286 :
287 : // Since we promote all surviving large objects immediatelly, all remaining
288 : // large objects must be dead.
289 : // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
290 47188 : heap_->new_lo_space()->FreeAllObjects();
291 :
292 111525 : RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
293 111525 : if (chunk->SweepingDone()) {
294 111043 : RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
295 : } else {
296 482 : RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
297 : }
298 135119 : });
299 :
300 : // Update how much has survived scavenge.
301 47188 : heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
302 23594 : }
303 :
304 23594 : void ScavengerCollector::HandleSurvivingNewLargeObjects() {
305 47188 : for (SurvivingNewLargeObjectMapEntry update_info :
306 : surviving_new_large_objects_) {
307 : HeapObject object = update_info.first;
308 : Map map = update_info.second;
309 : // Order is important here. We have to re-install the map to have access
310 : // to meta-data like size during page promotion.
311 : object->set_map_word(MapWord::FromMap(map));
312 : LargePage* page = LargePage::FromHeapObject(object);
313 0 : heap_->lo_space()->PromoteNewLargeObject(page);
314 : }
315 : surviving_new_large_objects_.clear();
316 23594 : }
317 :
318 37629 : void ScavengerCollector::MergeSurvivingNewLargeObjects(
319 : const SurvivingNewLargeObjectsMap& objects) {
320 75258 : for (SurvivingNewLargeObjectMapEntry object : objects) {
321 : bool success = surviving_new_large_objects_.insert(object).second;
322 : USE(success);
323 : DCHECK(success);
324 : }
325 37629 : }
326 :
327 23594 : int ScavengerCollector::NumberOfScavengeTasks() {
328 23594 : if (!FLAG_parallel_scavenge) return 1;
329 : const int num_scavenge_tasks =
330 46332 : static_cast<int>(heap_->new_space()->TotalCapacity()) / MB;
331 23166 : static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
332 : int tasks =
333 23166 : Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
334 23166 : if (!heap_->CanExpandOldGeneration(
335 23166 : static_cast<size_t>(tasks * Page::kPageSize))) {
336 : // Optimize for memory usage near the heap limit.
337 : tasks = 1;
338 : }
339 23166 : return tasks;
340 : }
341 :
342 112887 : Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
343 : CopiedList* copied_list, PromotionList* promotion_list,
344 : int task_id)
345 : : collector_(collector),
346 : heap_(heap),
347 : promotion_list_(promotion_list, task_id),
348 : copied_list_(copied_list, task_id),
349 : local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
350 : copied_size_(0),
351 : promoted_size_(0),
352 : allocator_(heap),
353 : is_logging_(is_logging),
354 : is_incremental_marking_(heap->incremental_marking()->IsMarking()),
355 188145 : is_compacting_(heap->incremental_marking()->IsCompacting()) {}
356 :
357 45612848 : void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
358 45612760 : int size) {
359 : // We are not collecting slots on new space objects during mutation thus we
360 : // have to scan for pointers to evacuation candidates when we promote
361 : // objects. But we should not record any slots in non-black objects. Grey
362 : // object's slots would be rescanned. White object might not survive until
363 : // the end of collection it would be a violation of the invariant to record
364 : // its slots.
365 : const bool record_slots =
366 45813185 : is_compacting_ &&
367 : heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
368 : IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
369 : target->IterateBodyFast(map, size, &visitor);
370 45453952 : }
371 :
372 111310 : void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
373 110984 : AllocationSpace space = page->owner()->identity();
374 188458 : if ((space == OLD_SPACE) && !page->SweepingDone()) {
375 : heap()->mark_compact_collector()->sweeper()->AddPage(
376 : space, reinterpret_cast<Page*>(page),
377 326 : Sweeper::READD_TEMPORARY_REMOVED_PAGE);
378 : }
379 110985 : }
380 :
381 111048 : void Scavenger::ScavengePage(MemoryChunk* page) {
382 111048 : CodePageMemoryModificationScope memory_modification_scope(page);
383 : RememberedSet<OLD_TO_NEW>::Iterate(page,
384 : [this](MaybeObjectSlot addr) {
385 : return CheckAndScavengeObject(heap_,
386 40494386 : addr);
387 40494386 : },
388 111039 : SlotSet::KEEP_EMPTY_BUCKETS);
389 : RememberedSet<OLD_TO_NEW>::IterateTyped(
390 : page, [=](SlotType type, Address addr) {
391 : return UpdateTypedSlotHelper::UpdateTypedSlot(
392 : heap_, type, addr, [this](FullMaybeObjectSlot slot) {
393 166494 : return CheckAndScavengeObject(heap(), slot);
394 333072 : });
395 277602 : });
396 :
397 110994 : AddPageToSweeperIfNecessary(page);
398 110980 : }
399 :
400 125662 : void Scavenger::Process(OneshotBarrier* barrier) {
401 : ScavengeVisitor scavenge_visitor(this);
402 :
403 125662 : const bool have_barrier = barrier != nullptr;
404 : bool done;
405 : size_t objects = 0;
406 127203 : do {
407 : done = true;
408 153806 : ObjectAndSize object_and_size;
409 237483163 : while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
410 : copied_list_.Pop(&object_and_size)) {
411 : scavenge_visitor.Visit(object_and_size.first);
412 : done = false;
413 63616577 : if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
414 64741964 : if (!copied_list_.IsGlobalPoolEmpty()) {
415 186771 : barrier->NotifyAll();
416 : }
417 : }
418 : }
419 :
420 : struct PromotionListEntry entry;
421 45696179 : while (promotion_list_.Pop(&entry)) {
422 45568976 : HeapObject target = entry.heap_object;
423 : DCHECK(!target->IsMap());
424 45568976 : IterateAndScavengePromotedObject(target, entry.map, entry.size);
425 : done = false;
426 45458787 : if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
427 350836 : if (!promotion_list_.IsGlobalPoolEmpty()) {
428 123457 : barrier->NotifyAll();
429 : }
430 : }
431 : }
432 : } while (!done);
433 99059 : }
434 :
435 150516 : void Scavenger::Finalize() {
436 75258 : heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
437 37629 : heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
438 37629 : heap()->IncrementPromotedObjectsSize(promoted_size_);
439 37629 : collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
440 37629 : allocator_.Finalize();
441 37629 : }
442 :
443 37888338 : void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
444 : FullObjectSlot p) {
445 : DCHECK(!HasWeakHeapObjectTag(*p));
446 37888338 : ScavengePointer(p);
447 37888338 : }
448 :
449 2294591 : void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
450 : FullObjectSlot start,
451 : FullObjectSlot end) {
452 : // Copy all HeapObject pointers in [start, end)
453 50589571 : for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
454 2294591 : }
455 :
456 83888727 : void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
457 : Object object = *p;
458 : DCHECK(!HasWeakHeapObjectTag(object));
459 167777454 : if (!Heap::InNewSpace(object)) return;
460 :
461 8928751 : scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
462 : }
463 :
464 0 : RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
465 23594 : : scavenger_(scavenger) {}
466 :
467 0 : ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
468 125662 : : scavenger_(scavenger) {}
469 :
470 : } // namespace internal
471 183867 : } // namespace v8
|