Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/sweeper.h"
6 :
7 : #include "src/base/template-utils.h"
8 : #include "src/heap/array-buffer-tracker-inl.h"
9 : #include "src/heap/gc-tracer.h"
10 : #include "src/heap/mark-compact-inl.h"
11 : #include "src/heap/remembered-set.h"
12 : #include "src/objects-inl.h"
13 : #include "src/vm-state-inl.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 62882 : Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
19 : : heap_(heap),
20 : marking_state_(marking_state),
21 : num_tasks_(0),
22 : pending_sweeper_tasks_semaphore_(0),
23 : incremental_sweeper_pending_(false),
24 : sweeping_in_progress_(false),
25 : num_sweeping_tasks_(0),
26 : stop_sweeper_tasks_(false),
27 : iterability_task_semaphore_(0),
28 : iterability_in_progress_(false),
29 : iterability_task_started_(false),
30 503058 : should_reduce_memory_(false) {}
31 :
32 23594 : Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
33 23594 : : sweeper_(sweeper) {
34 23594 : sweeper_->stop_sweeper_tasks_ = true;
35 70782 : if (!sweeper_->sweeping_in_progress()) return;
36 :
37 1903 : sweeper_->AbortAndWaitForTasks();
38 :
39 : // Complete sweeping if there's nothing more to do.
40 3806 : if (sweeper_->IsDoneSweeping()) {
41 58 : sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
42 : DCHECK(!sweeper_->sweeping_in_progress());
43 : } else {
44 : // Unless sweeping is complete the flag still indicates that the sweeper
45 : // is enabled. It just cannot use tasks anymore.
46 : DCHECK(sweeper_->sweeping_in_progress());
47 : }
48 : }
49 :
50 23594 : Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
51 47188 : sweeper_->stop_sweeper_tasks_ = false;
52 47188 : if (!sweeper_->sweeping_in_progress()) return;
53 :
54 1845 : sweeper_->StartSweeperTasks();
55 23594 : }
56 :
57 23594 : Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
58 : Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
59 : : sweeper_(sweeper),
60 : pause_or_complete_scope_(pause_or_complete_scope),
61 47188 : sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
62 : USE(pause_or_complete_scope_);
63 47188 : if (!sweeping_in_progress_) return;
64 :
65 : int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
66 1845 : old_space_sweeping_list_ =
67 1845 : std::move(sweeper_->sweeping_list_[old_space_index]);
68 1845 : sweeper_->sweeping_list_[old_space_index].clear();
69 : }
70 :
71 23594 : Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
72 : DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
73 23594 : if (!sweeping_in_progress_) return;
74 :
75 1845 : sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
76 1845 : std::move(old_space_sweeping_list_);
77 : // old_space_sweeping_list_ does not need to be cleared as we don't use it.
78 23594 : }
79 :
80 : class Sweeper::SweeperTask final : public CancelableTask {
81 : public:
82 : SweeperTask(Isolate* isolate, Sweeper* sweeper,
83 : base::Semaphore* pending_sweeper_tasks,
84 : std::atomic<intptr_t>* num_sweeping_tasks,
85 : AllocationSpace space_to_start)
86 : : CancelableTask(isolate),
87 : sweeper_(sweeper),
88 : pending_sweeper_tasks_(pending_sweeper_tasks),
89 : num_sweeping_tasks_(num_sweeping_tasks),
90 : space_to_start_(space_to_start),
91 253494 : tracer_(isolate->heap()->tracer()) {}
92 :
93 506905 : ~SweeperTask() override = default;
94 :
95 : private:
96 236126 : void RunInternal() final {
97 944117 : TRACE_BACKGROUND_GC(tracer_,
98 : GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
99 : DCHECK(IsValidSweepingSpace(space_to_start_));
100 235796 : const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
101 945140 : for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
102 : const AllocationSpace space_id = static_cast<AllocationSpace>(
103 708680 : FIRST_GROWABLE_PAGED_SPACE +
104 1417360 : ((i + offset) % kNumberOfSweepingSpaces));
105 : // Do not sweep code space concurrently.
106 708680 : if (space_id == CODE_SPACE) continue;
107 : DCHECK(IsValidSweepingSpace(space_id));
108 472601 : sweeper_->SweepSpaceFromTask(space_id);
109 : }
110 236460 : (*num_sweeping_tasks_)--;
111 472908 : pending_sweeper_tasks_->Signal();
112 236461 : }
113 :
114 : Sweeper* const sweeper_;
115 : base::Semaphore* const pending_sweeper_tasks_;
116 : std::atomic<intptr_t>* const num_sweeping_tasks_;
117 : AllocationSpace space_to_start_;
118 : GCTracer* const tracer_;
119 :
120 : DISALLOW_COPY_AND_ASSIGN(SweeperTask);
121 : };
122 :
123 : class Sweeper::IncrementalSweeperTask final : public CancelableTask {
124 : public:
125 : IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
126 31229 : : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
127 :
128 62458 : ~IncrementalSweeperTask() override = default;
129 :
130 : private:
131 30200 : void RunInternal() final {
132 30200 : VMState<GC> state(isolate_);
133 90600 : TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
134 :
135 60400 : sweeper_->incremental_sweeper_pending_ = false;
136 :
137 60400 : if (sweeper_->sweeping_in_progress()) {
138 24998 : if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
139 5147 : sweeper_->ScheduleIncrementalSweepingTask();
140 : }
141 : }
142 30200 : }
143 :
144 : Isolate* const isolate_;
145 : Sweeper* const sweeper_;
146 : DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
147 : };
148 :
149 83492 : void Sweeper::StartSweeping() {
150 83492 : CHECK(!stop_sweeper_tasks_);
151 83492 : sweeping_in_progress_ = true;
152 83492 : iterability_in_progress_ = true;
153 166984 : should_reduce_memory_ = heap_->ShouldReduceMemory();
154 : MajorNonAtomicMarkingState* marking_state =
155 83492 : heap_->mark_compact_collector()->non_atomic_marking_state();
156 250476 : ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
157 : int space_index = GetSweepSpaceIndex(space);
158 : std::sort(sweeping_list_[space_index].begin(),
159 250476 : sweeping_list_[space_index].end(),
160 493280 : [marking_state](Page* a, Page* b) {
161 : return marking_state->live_bytes(a) <
162 : marking_state->live_bytes(b);
163 994232 : });
164 333968 : });
165 83492 : }
166 :
167 85342 : void Sweeper::StartSweeperTasks() {
168 : DCHECK_EQ(0, num_tasks_);
169 : DCHECK_EQ(0, num_sweeping_tasks_);
170 169860 : if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
171 84518 : !heap_->delay_sweeper_tasks_for_testing_) {
172 253494 : ForAllSweepingSpaces([this](AllocationSpace space) {
173 : DCHECK(IsValidSweepingSpace(space));
174 253494 : num_sweeping_tasks_++;
175 : auto task = base::make_unique<SweeperTask>(
176 253494 : heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
177 506988 : &num_sweeping_tasks_, space);
178 : DCHECK_LT(num_tasks_, kMaxSweeperTasks);
179 506988 : task_ids_[num_tasks_++] = task->id();
180 760482 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
181 337992 : });
182 84498 : ScheduleIncrementalSweepingTask();
183 : }
184 85342 : }
185 :
186 566074 : void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
187 566074 : if (!page->SweepingDone()) {
188 4036 : ParallelSweepPage(page, page->owner()->identity());
189 4036 : if (!page->SweepingDone()) {
190 : // We were not able to sweep that page, i.e., a concurrent
191 : // sweeper thread currently owns this page. Wait for the sweeper
192 : // thread to be done with this page.
193 : page->WaitUntilSweepingCompleted();
194 : }
195 : }
196 566074 : }
197 :
198 919111 : Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
199 919111 : base::MutexGuard guard(&mutex_);
200 1838594 : SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
201 919297 : if (!list.empty()) {
202 524571 : auto last_page = list.back();
203 : list.pop_back();
204 524571 : return last_page;
205 : }
206 : return nullptr;
207 : }
208 :
209 85395 : void Sweeper::AbortAndWaitForTasks() {
210 170790 : if (!FLAG_concurrent_sweeping) return;
211 :
212 253494 : for (int i = 0; i < num_tasks_; i++) {
213 506988 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
214 : TryAbortResult::kTaskAborted) {
215 236466 : pending_sweeper_tasks_semaphore_.Wait();
216 : } else {
217 : // Aborted case.
218 : num_sweeping_tasks_--;
219 : }
220 : }
221 84828 : num_tasks_ = 0;
222 : DCHECK_EQ(0, num_sweeping_tasks_);
223 : }
224 :
225 83492 : void Sweeper::EnsureCompleted() {
226 166984 : if (!sweeping_in_progress_) return;
227 :
228 83492 : EnsureIterabilityCompleted();
229 :
230 : // If sweeping is not completed or not running at all, we try to complete it
231 : // here.
232 : ForAllSweepingSpaces(
233 166984 : [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
234 :
235 83492 : AbortAndWaitForTasks();
236 :
237 250476 : ForAllSweepingSpaces([this](AllocationSpace space) {
238 751428 : CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
239 333968 : });
240 83492 : sweeping_in_progress_ = false;
241 : }
242 :
243 108984 : bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
244 :
245 1577748 : int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
246 : FreeSpaceTreatmentMode free_space_mode) {
247 2949077 : Space* space = p->owner();
248 : DCHECK_NOT_NULL(space);
249 : DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
250 : space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
251 : DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
252 :
253 : // TODO(ulan): we don't have to clear type old-to-old slots in code space
254 : // because the concurrent marker doesn't mark code objects. This requires
255 : // the write barrier for code objects to check the color of the code object.
256 1050649 : bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
257 : p->typed_slot_set<OLD_TO_OLD>() != nullptr;
258 :
259 : // The free ranges map is used for filtering typed slots.
260 : std::map<uint32_t, uint32_t> free_ranges;
261 :
262 : // Before we sweep objects on the page, we free dead array buffers which
263 : // requires valid mark bits.
264 525986 : ArrayBufferTracker::FreeDead(p, marking_state_);
265 :
266 : Address free_start = p->area_start();
267 :
268 : // If we use the skip list for code space pages, we have to lock the skip
269 : // list because it could be accessed concurrently by the runtime or the
270 : // deoptimizer.
271 : const bool rebuild_skip_list =
272 638906 : space->identity() == CODE_SPACE && p->skip_list() != nullptr;
273 : SkipList* skip_list = p->skip_list();
274 525727 : if (rebuild_skip_list) {
275 : skip_list->Clear();
276 : }
277 :
278 : intptr_t live_bytes = 0;
279 : intptr_t freed_bytes = 0;
280 : intptr_t max_freed_bytes = 0;
281 : int curr_region = -1;
282 :
283 : // Set the allocated_bytes counter to area_size. The free operations below
284 : // will decrease the counter to actual live bytes.
285 525727 : p->ResetAllocatedBytes();
286 :
287 658781339 : for (auto object_and_size :
288 658781339 : LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
289 658255143 : HeapObject const object = object_and_size.first;
290 : DCHECK(marking_state_->IsBlack(object));
291 : Address free_end = object->address();
292 658255143 : if (free_end != free_start) {
293 21452093 : CHECK_GT(free_end, free_start);
294 21452093 : size_t size = static_cast<size_t>(free_end - free_start);
295 21452093 : if (free_space_mode == ZAP_FREE_SPACE) {
296 : ZapCode(free_start, size);
297 : }
298 21451402 : if (free_list_mode == REBUILD_FREE_LIST) {
299 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
300 21216998 : free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
301 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
302 : } else {
303 : p->heap()->CreateFillerObjectAt(
304 : free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
305 468808 : ClearFreedMemoryMode::kClearFreedMemory);
306 : }
307 21437433 : if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
308 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
309 21435591 : SlotSet::KEEP_EMPTY_BUCKETS);
310 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
311 21463115 : SlotSet::KEEP_EMPTY_BUCKETS);
312 21455817 : if (non_empty_typed_slots) {
313 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
314 : static_cast<uint32_t>(free_start - p->address()),
315 7599 : static_cast<uint32_t>(free_end - p->address())));
316 : }
317 : }
318 658258867 : Map map = object->synchronized_map();
319 658258867 : int size = object->SizeFromMap(map);
320 657333897 : live_bytes += size;
321 657333897 : if (rebuild_skip_list) {
322 : int new_region_start = SkipList::RegionNumber(free_end);
323 : int new_region_end =
324 126608999 : SkipList::RegionNumber(free_end + size - kTaggedSize);
325 126608999 : if (new_region_start != curr_region || new_region_end != curr_region) {
326 : skip_list->AddObject(free_end, size);
327 : curr_region = new_region_end;
328 : }
329 : }
330 657333897 : free_start = free_end + size;
331 : }
332 :
333 526196 : if (free_start != p->area_end()) {
334 510682 : CHECK_GT(p->area_end(), free_start);
335 510682 : size_t size = static_cast<size_t>(p->area_end() - free_start);
336 510682 : if (free_space_mode == ZAP_FREE_SPACE) {
337 : ZapCode(free_start, size);
338 : }
339 510677 : if (free_list_mode == REBUILD_FREE_LIST) {
340 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
341 509344 : free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
342 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
343 : } else {
344 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
345 : ClearRecordedSlots::kNo,
346 2666 : ClearFreedMemoryMode::kClearFreedMemory);
347 : }
348 510641 : if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
349 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
350 510632 : SlotSet::KEEP_EMPTY_BUCKETS);
351 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
352 510629 : SlotSet::KEEP_EMPTY_BUCKETS);
353 510623 : if (non_empty_typed_slots) {
354 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
355 : static_cast<uint32_t>(free_start - p->address()),
356 3750 : static_cast<uint32_t>(p->area_end() - p->address())));
357 : }
358 : }
359 :
360 : // Clear invalid typed slots after collection all free ranges.
361 526137 : if (!free_ranges.empty()) {
362 : TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
363 1251 : if (old_to_new != nullptr) {
364 1153 : old_to_new->ClearInvalidSlots(free_ranges);
365 : }
366 : TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
367 1251 : if (old_to_old != nullptr) {
368 104 : old_to_old->ClearInvalidSlots(free_ranges);
369 : }
370 : }
371 :
372 526137 : marking_state_->bitmap(p)->Clear();
373 526204 : if (free_list_mode == IGNORE_FREE_LIST) {
374 : marking_state_->SetLiveBytes(p, 0);
375 : // We did not free memory, so have to adjust allocated bytes here.
376 1650 : intptr_t freed_bytes = p->area_size() - live_bytes;
377 : p->DecreaseAllocatedBytes(freed_bytes);
378 : } else {
379 : // Keep the old live bytes counter of the page until RefillFreeList, where
380 : // the space size is refined.
381 : // The allocated_bytes() counter is precisely the total size of objects.
382 : DCHECK_EQ(live_bytes, p->allocated_bytes());
383 : }
384 : p->set_concurrent_sweeping_state(Page::kSweepingDone);
385 526204 : if (free_list_mode == IGNORE_FREE_LIST) return 0;
386 1049112 : return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
387 : }
388 :
389 472372 : void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
390 : Page* page = nullptr;
391 1275322 : while (!stop_sweeper_tasks_ &&
392 : ((page = GetSweepingPageSafe(identity)) != nullptr)) {
393 330540 : ParallelSweepPage(page, identity);
394 : }
395 472922 : }
396 :
397 24998 : bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
398 24998 : if (Page* page = GetSweepingPageSafe(identity)) {
399 24908 : ParallelSweepPage(page, identity);
400 : }
401 49996 : return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
402 : }
403 :
404 308422 : int Sweeper::ParallelSweepSpace(AllocationSpace identity,
405 : int required_freed_bytes, int max_pages) {
406 : int max_freed = 0;
407 : int pages_freed = 0;
408 : Page* page = nullptr;
409 732440 : while ((page = GetSweepingPageSafe(identity)) != nullptr) {
410 169241 : int freed = ParallelSweepPage(page, identity);
411 169243 : pages_freed += 1;
412 : DCHECK_GE(freed, 0);
413 : max_freed = Max(max_freed, freed);
414 169243 : if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
415 : return max_freed;
416 124078 : if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
417 : }
418 : return max_freed;
419 : }
420 :
421 528657 : int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
422 : // Early bailout for pages that are swept outside of the regular sweeping
423 : // path. This check here avoids taking the lock first, avoiding deadlocks.
424 1581833 : if (page->SweepingDone()) return 0;
425 :
426 : DCHECK(IsValidSweepingSpace(identity));
427 : int max_freed = 0;
428 : {
429 524519 : base::MutexGuard guard(page->mutex());
430 : // If this page was already swept in the meantime, we can return here.
431 1049044 : if (page->SweepingDone()) return 0;
432 :
433 : // If the page is a code page, the CodePageMemoryModificationScope changes
434 : // the page protection mode from rx -> rw while sweeping.
435 524522 : CodePageMemoryModificationScope code_page_scope(page);
436 :
437 : DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
438 524500 : page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
439 : const FreeSpaceTreatmentMode free_space_mode =
440 : Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
441 524500 : max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
442 : DCHECK(page->SweepingDone());
443 :
444 : // After finishing sweeping of a page we clean up its remembered set.
445 524536 : TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
446 524536 : if (typed_slot_set) {
447 1153 : typed_slot_set->FreeToBeFreedChunks();
448 : }
449 524520 : SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
450 524520 : if (slot_set) {
451 331058 : slot_set->FreeToBeFreedBuckets();
452 : }
453 : }
454 :
455 : {
456 524535 : base::MutexGuard guard(&mutex_);
457 524571 : swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
458 : }
459 524567 : return max_freed;
460 : }
461 :
462 89645 : void Sweeper::ScheduleIncrementalSweepingTask() {
463 89645 : if (!incremental_sweeper_pending_) {
464 31229 : incremental_sweeper_pending_ = true;
465 31229 : v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
466 : auto taskrunner =
467 31229 : V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
468 31229 : taskrunner->PostTask(
469 156145 : base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
470 : }
471 89645 : }
472 :
473 524898 : void Sweeper::AddPage(AllocationSpace space, Page* page,
474 : Sweeper::AddPageMode mode) {
475 524898 : base::MutexGuard guard(&mutex_);
476 : DCHECK(IsValidSweepingSpace(space));
477 : DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
478 524898 : if (mode == Sweeper::REGULAR) {
479 524571 : PrepareToBeSweptPage(space, page);
480 : } else {
481 : // Page has been temporarily removed from the sweeper. Accounting already
482 : // happened when the page was initially added, so it is skipped here.
483 : DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
484 : }
485 : DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
486 524898 : sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
487 524898 : }
488 :
489 524571 : void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
490 : DCHECK_GE(page->area_size(),
491 : static_cast<size_t>(marking_state_->live_bytes(page)));
492 : DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
493 : page->ForAllFreeListCategories(
494 : [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
495 : page->set_concurrent_sweeping_state(Page::kSweepingPending);
496 : heap_->paged_space(space)->IncreaseAllocatedBytes(
497 524571 : marking_state_->live_bytes(page), page);
498 524571 : }
499 :
500 1250263 : Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
501 1250263 : base::MutexGuard guard(&mutex_);
502 : DCHECK(IsValidSweepingSpace(space));
503 : int space_index = GetSweepSpaceIndex(space);
504 : Page* page = nullptr;
505 2502198 : if (!sweeping_list_[space_index].empty()) {
506 524700 : page = sweeping_list_[space_index].front();
507 524700 : sweeping_list_[space_index].pop_front();
508 : }
509 1251075 : return page;
510 : }
511 :
512 574370 : void Sweeper::EnsurePageIsIterable(Page* page) {
513 574370 : AllocationSpace space = page->owner()->identity();
514 574370 : if (IsValidSweepingSpace(space)) {
515 566074 : SweepOrWaitUntilSweepingCompleted(page);
516 : } else {
517 : DCHECK(IsValidIterabilitySpace(space));
518 8296 : EnsureIterabilityCompleted();
519 : }
520 574370 : }
521 :
522 115382 : void Sweeper::EnsureIterabilityCompleted() {
523 230764 : if (!iterability_in_progress_) return;
524 :
525 83492 : if (FLAG_concurrent_sweeping && iterability_task_started_) {
526 425 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(
527 850 : iterability_task_id_) != TryAbortResult::kTaskAborted) {
528 398 : iterability_task_semaphore_.Wait();
529 : }
530 425 : iterability_task_started_ = false;
531 : }
532 :
533 167260 : for (Page* page : iterability_list_) {
534 : MakeIterable(page);
535 : }
536 : iterability_list_.clear();
537 83492 : iterability_in_progress_ = false;
538 : }
539 :
540 : class Sweeper::IterabilityTask final : public CancelableTask {
541 : public:
542 : IterabilityTask(Isolate* isolate, Sweeper* sweeper,
543 : base::Semaphore* pending_iterability_task)
544 : : CancelableTask(isolate),
545 : sweeper_(sweeper),
546 : pending_iterability_task_(pending_iterability_task),
547 425 : tracer_(isolate->heap()->tracer()) {}
548 :
549 850 : ~IterabilityTask() override = default;
550 :
551 : private:
552 398 : void RunInternal() final {
553 1592 : TRACE_BACKGROUND_GC(tracer_,
554 : GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
555 2383 : for (Page* page : sweeper_->iterability_list_) {
556 1189 : sweeper_->MakeIterable(page);
557 : }
558 398 : sweeper_->iterability_list_.clear();
559 796 : pending_iterability_task_->Signal();
560 398 : }
561 :
562 : Sweeper* const sweeper_;
563 : base::Semaphore* const pending_iterability_task_;
564 : GCTracer* const tracer_;
565 :
566 : DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
567 : };
568 :
569 83492 : void Sweeper::StartIterabilityTasks() {
570 166984 : if (!iterability_in_progress_) return;
571 :
572 : DCHECK(!iterability_task_started_);
573 166180 : if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
574 : auto task = base::make_unique<IterabilityTask>(
575 850 : heap_->isolate(), this, &iterability_task_semaphore_);
576 425 : iterability_task_id_ = task->id();
577 425 : iterability_task_started_ = true;
578 1275 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
579 : }
580 : }
581 :
582 1465 : void Sweeper::AddPageForIterability(Page* page) {
583 : DCHECK(sweeping_in_progress_);
584 : DCHECK(iterability_in_progress_);
585 : DCHECK(!iterability_task_started_);
586 : DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
587 : DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
588 :
589 1465 : iterability_list_.push_back(page);
590 1465 : page->set_concurrent_sweeping_state(Page::kSweepingPending);
591 1465 : }
592 :
593 0 : void Sweeper::MakeIterable(Page* page) {
594 : DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
595 : const FreeSpaceTreatmentMode free_space_mode =
596 : Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
597 1465 : RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
598 0 : }
599 :
600 : } // namespace internal
601 183867 : } // namespace v8
|