Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/sweeper.h"
6 :
7 : #include "src/base/template-utils.h"
8 : #include "src/heap/array-buffer-tracker-inl.h"
9 : #include "src/heap/gc-tracer.h"
10 : #include "src/heap/mark-compact-inl.h"
11 : #include "src/heap/remembered-set.h"
12 : #include "src/objects-inl.h"
13 : #include "src/vm-state-inl.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 61534 : Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
19 : : heap_(heap),
20 : marking_state_(marking_state),
21 : num_tasks_(0),
22 : pending_sweeper_tasks_semaphore_(0),
23 : incremental_sweeper_pending_(false),
24 : sweeping_in_progress_(false),
25 : num_sweeping_tasks_(0),
26 : stop_sweeper_tasks_(false),
27 : iterability_task_semaphore_(0),
28 : iterability_in_progress_(false),
29 : iterability_task_started_(false),
30 492272 : should_reduce_memory_(false) {}
31 :
32 20973 : Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
33 20973 : : sweeper_(sweeper) {
34 : sweeper_->stop_sweeper_tasks_ = true;
35 20973 : if (!sweeper_->sweeping_in_progress()) return;
36 :
37 1988 : sweeper_->AbortAndWaitForTasks();
38 :
39 : // Complete sweeping if there's nothing more to do.
40 3976 : if (sweeper_->IsDoneSweeping()) {
41 50 : sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
42 : DCHECK(!sweeper_->sweeping_in_progress());
43 : } else {
44 : // Unless sweeping is complete the flag still indicates that the sweeper
45 : // is enabled. It just cannot use tasks anymore.
46 : DCHECK(sweeper_->sweeping_in_progress());
47 : }
48 : }
49 :
50 22936 : Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
51 20973 : sweeper_->stop_sweeper_tasks_ = false;
52 20973 : if (!sweeper_->sweeping_in_progress()) return;
53 :
54 1963 : sweeper_->StartSweeperTasks();
55 20973 : }
56 :
57 20973 : Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
58 : Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
59 : : sweeper_(sweeper),
60 : pause_or_complete_scope_(pause_or_complete_scope),
61 41946 : sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
62 : USE(pause_or_complete_scope_);
63 20973 : if (!sweeping_in_progress_) return;
64 :
65 : int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
66 1963 : old_space_sweeping_list_ =
67 1963 : std::move(sweeper_->sweeping_list_[old_space_index]);
68 1963 : sweeper_->sweeping_list_[old_space_index].clear();
69 : }
70 :
71 22936 : Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
72 : DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
73 20973 : if (!sweeping_in_progress_) return;
74 :
75 1963 : sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
76 1963 : std::move(old_space_sweeping_list_);
77 : // old_space_sweeping_list_ does not need to be cleared as we don't use it.
78 20973 : }
79 :
80 : class Sweeper::SweeperTask final : public CancelableTask {
81 : public:
82 : SweeperTask(Isolate* isolate, Sweeper* sweeper,
83 : base::Semaphore* pending_sweeper_tasks,
84 : std::atomic<intptr_t>* num_sweeping_tasks,
85 : AllocationSpace space_to_start)
86 : : CancelableTask(isolate),
87 : sweeper_(sweeper),
88 : pending_sweeper_tasks_(pending_sweeper_tasks),
89 : num_sweeping_tasks_(num_sweeping_tasks),
90 : space_to_start_(space_to_start),
91 450252 : tracer_(isolate->heap()->tracer()) {}
92 :
93 450144 : ~SweeperTask() override = default;
94 :
95 : private:
96 215562 : void RunInternal() final {
97 861389 : TRACE_BACKGROUND_GC(tracer_,
98 : GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
99 : DCHECK(IsValidSweepingSpace(space_to_start_));
100 214935 : const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
101 1511027 : for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
102 : const AllocationSpace space_id = static_cast<AllocationSpace>(
103 646946 : FIRST_GROWABLE_PAGED_SPACE +
104 1293892 : ((i + offset) % kNumberOfSweepingSpaces));
105 : // Do not sweep code space concurrently.
106 646946 : if (space_id == CODE_SPACE) continue;
107 : DCHECK(IsValidSweepingSpace(space_id));
108 431483 : sweeper_->SweepSpaceFromTask(space_id);
109 : }
110 216035 : (*num_sweeping_tasks_)--;
111 216035 : pending_sweeper_tasks_->Signal();
112 216046 : }
113 :
114 : Sweeper* const sweeper_;
115 : base::Semaphore* const pending_sweeper_tasks_;
116 : std::atomic<intptr_t>* const num_sweeping_tasks_;
117 : AllocationSpace space_to_start_;
118 : GCTracer* const tracer_;
119 :
120 : DISALLOW_COPY_AND_ASSIGN(SweeperTask);
121 : };
122 :
123 : class Sweeper::IncrementalSweeperTask final : public CancelableTask {
124 : public:
125 : IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
126 25747 : : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
127 :
128 51494 : ~IncrementalSweeperTask() override = default;
129 :
130 : private:
131 24772 : void RunInternal() final {
132 24772 : VMState<GC> state(isolate_);
133 49544 : TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
134 :
135 24772 : sweeper_->incremental_sweeper_pending_ = false;
136 :
137 24772 : if (sweeper_->sweeping_in_progress()) {
138 19926 : if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
139 4153 : sweeper_->ScheduleIncrementalSweepingTask();
140 : }
141 : }
142 24772 : }
143 :
144 : Isolate* const isolate_;
145 : Sweeper* const sweeper_;
146 : DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
147 : };
148 :
149 73955 : void Sweeper::StartSweeping() {
150 73955 : CHECK(!stop_sweeper_tasks_);
151 73955 : sweeping_in_progress_ = true;
152 73955 : iterability_in_progress_ = true;
153 147910 : should_reduce_memory_ = heap_->ShouldReduceMemory();
154 : MajorNonAtomicMarkingState* marking_state =
155 : heap_->mark_compact_collector()->non_atomic_marking_state();
156 665595 : ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
157 : int space_index = GetSweepSpaceIndex(space);
158 221865 : std::sort(sweeping_list_[space_index].begin(),
159 443730 : sweeping_list_[space_index].end(),
160 : [marking_state](Page* a, Page* b) {
161 : return marking_state->live_bytes(a) <
162 : marking_state->live_bytes(b);
163 443730 : });
164 295820 : });
165 73955 : }
166 :
167 75923 : void Sweeper::StartSweeperTasks() {
168 : DCHECK_EQ(0, num_tasks_);
169 : DCHECK_EQ(0, num_sweeping_tasks_);
170 150985 : if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
171 75062 : !heap_->delay_sweeper_tasks_for_testing_) {
172 675378 : ForAllSweepingSpaces([this](AllocationSpace space) {
173 : DCHECK(IsValidSweepingSpace(space));
174 : num_sweeping_tasks_++;
175 : auto task = base::make_unique<SweeperTask>(
176 225126 : heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
177 450252 : &num_sweeping_tasks_, space);
178 : DCHECK_LT(num_tasks_, kMaxSweeperTasks);
179 450252 : task_ids_[num_tasks_++] = task->id();
180 675378 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
181 300168 : });
182 75042 : ScheduleIncrementalSweepingTask();
183 : }
184 75923 : }
185 :
186 577733 : void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
187 577733 : if (!page->SweepingDone()) {
188 2819 : ParallelSweepPage(page, page->owner()->identity());
189 2819 : if (!page->SweepingDone()) {
190 : // We were not able to sweep that page, i.e., a concurrent
191 : // sweeper thread currently owns this page. Wait for the sweeper
192 : // thread to be done with this page.
193 : page->WaitUntilSweepingCompleted();
194 : }
195 : }
196 577733 : }
197 :
198 841875 : Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
199 841875 : base::MutexGuard guard(&mutex_);
200 841944 : SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
201 841944 : if (!list.empty()) {
202 469644 : auto last_page = list.back();
203 : list.pop_back();
204 469644 : return last_page;
205 : }
206 : return nullptr;
207 : }
208 :
209 75943 : void Sweeper::AbortAndWaitForTasks() {
210 75943 : if (!FLAG_concurrent_sweeping) return;
211 :
212 525586 : for (int i = 0; i < num_tasks_; i++) {
213 450252 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
214 : TryAbortResult::kTaskAborted) {
215 216050 : pending_sweeper_tasks_semaphore_.Wait();
216 : } else {
217 : // Aborted case.
218 : num_sweeping_tasks_--;
219 : }
220 : }
221 75334 : num_tasks_ = 0;
222 : DCHECK_EQ(0, num_sweeping_tasks_);
223 : }
224 :
225 73955 : void Sweeper::EnsureCompleted() {
226 73955 : if (!sweeping_in_progress_) return;
227 :
228 73955 : EnsureIterabilityCompleted();
229 :
230 : // If sweeping is not completed or not running at all, we try to complete it
231 : // here.
232 : ForAllSweepingSpaces(
233 147910 : [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
234 :
235 73955 : AbortAndWaitForTasks();
236 :
237 : ForAllSweepingSpaces([this](AllocationSpace space) {
238 73955 : CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
239 73955 : });
240 73955 : sweeping_in_progress_ = false;
241 : }
242 :
243 205024 : bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
244 :
245 471023 : int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
246 : FreeSpaceTreatmentMode free_space_mode) {
247 : Space* space = p->owner();
248 : DCHECK_NOT_NULL(space);
249 : DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
250 : space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
251 : DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
252 :
253 : // TODO(ulan): we don't have to clear type old-to-old slots in code space
254 : // because the concurrent marker doesn't mark code objects. This requires
255 : // the write barrier for code objects to check the color of the code object.
256 940818 : bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
257 : p->typed_slot_set<OLD_TO_OLD>() != nullptr;
258 :
259 : // The free ranges map is used for filtering typed slots.
260 : std::map<uint32_t, uint32_t> free_ranges;
261 :
262 : // Before we sweep objects on the page, we free dead array buffers which
263 : // requires valid mark bits.
264 471023 : ArrayBufferTracker::FreeDead(p, marking_state_);
265 :
266 : Address free_start = p->area_start();
267 :
268 : // If we use the skip list for code space pages, we have to lock the skip
269 : // list because it could be accessed concurrently by the runtime or the
270 : // deoptimizer.
271 : const bool rebuild_skip_list =
272 470746 : space->identity() == CODE_SPACE && p->skip_list() != nullptr;
273 : SkipList* skip_list = p->skip_list();
274 470746 : if (rebuild_skip_list) {
275 : skip_list->Clear();
276 : }
277 :
278 : intptr_t live_bytes = 0;
279 : intptr_t freed_bytes = 0;
280 : intptr_t max_freed_bytes = 0;
281 : int curr_region = -1;
282 :
283 : // Set the allocated_bytes counter to area_size. The free operations below
284 : // will decrease the counter to actual live bytes.
285 470746 : p->ResetAllocatedBytes();
286 :
287 552300280 : for (auto object_and_size :
288 : LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
289 551828985 : HeapObject const object = object_and_size.first;
290 : DCHECK(marking_state_->IsBlack(object));
291 : Address free_end = object->address();
292 551828985 : if (free_end != free_start) {
293 19326560 : CHECK_GT(free_end, free_start);
294 19326560 : size_t size = static_cast<size_t>(free_end - free_start);
295 19326560 : if (free_space_mode == ZAP_FREE_SPACE) {
296 : ZapCode(free_start, size);
297 : }
298 19326560 : if (free_list_mode == REBUILD_FREE_LIST) {
299 19204508 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
300 19175195 : free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
301 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
302 : } else {
303 : p->heap()->CreateFillerObjectAt(
304 : free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
305 122052 : ClearFreedMemoryMode::kClearFreedMemory);
306 : }
307 19299797 : if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
308 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
309 19295083 : SlotSet::KEEP_EMPTY_BUCKETS);
310 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
311 19307130 : SlotSet::KEEP_EMPTY_BUCKETS);
312 19297665 : if (non_empty_typed_slots) {
313 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
314 : static_cast<uint32_t>(free_start - p->address()),
315 7122 : static_cast<uint32_t>(free_end - p->address())));
316 : }
317 : }
318 551800090 : Map map = object->synchronized_map();
319 551800090 : int size = object->SizeFromMap(map);
320 549092935 : live_bytes += size;
321 549092935 : if (rebuild_skip_list) {
322 : int new_region_start = SkipList::RegionNumber(free_end);
323 : int new_region_end =
324 112695726 : SkipList::RegionNumber(free_end + size - kTaggedSize);
325 112695726 : if (new_region_start != curr_region || new_region_end != curr_region) {
326 : skip_list->AddObject(free_end, size);
327 : curr_region = new_region_end;
328 : }
329 : }
330 549092935 : free_start = free_end + size;
331 : }
332 :
333 471295 : if (free_start != p->area_end()) {
334 465577 : CHECK_GT(p->area_end(), free_start);
335 465577 : size_t size = static_cast<size_t>(p->area_end() - free_start);
336 465577 : if (free_space_mode == ZAP_FREE_SPACE) {
337 : ZapCode(free_start, size);
338 : }
339 465577 : if (free_list_mode == REBUILD_FREE_LIST) {
340 464494 : freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
341 464433 : free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
342 : max_freed_bytes = Max(freed_bytes, max_freed_bytes);
343 : } else {
344 : p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
345 : ClearRecordedSlots::kNo,
346 1083 : ClearFreedMemoryMode::kClearFreedMemory);
347 : }
348 465521 : if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
349 : RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
350 465475 : SlotSet::KEEP_EMPTY_BUCKETS);
351 : RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
352 465532 : SlotSet::KEEP_EMPTY_BUCKETS);
353 465512 : if (non_empty_typed_slots) {
354 : free_ranges.insert(std::pair<uint32_t, uint32_t>(
355 : static_cast<uint32_t>(free_start - p->address()),
356 3108 : static_cast<uint32_t>(p->area_end() - p->address())));
357 : }
358 : }
359 :
360 : // Clear invalid typed slots after collection all free ranges.
361 471230 : if (!free_ranges.empty()) {
362 : TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
363 1036 : if (old_to_new != nullptr) {
364 977 : old_to_new->ClearInvalidSlots(free_ranges);
365 : }
366 : TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
367 835 : if (old_to_old != nullptr) {
368 59 : old_to_old->ClearInvalidSlots(free_ranges);
369 : }
370 : }
371 :
372 : marking_state_->bitmap(p)->Clear();
373 471029 : if (free_list_mode == IGNORE_FREE_LIST) {
374 : marking_state_->SetLiveBytes(p, 0);
375 : // We did not free memory, so have to adjust allocated bytes here.
376 1721 : intptr_t freed_bytes = p->area_size() - live_bytes;
377 : p->DecreaseAllocatedBytes(freed_bytes);
378 : } else {
379 : // Keep the old live bytes counter of the page until RefillFreeList, where
380 : // the space size is refined.
381 : // The allocated_bytes() counter is precisely the total size of objects.
382 : DCHECK_EQ(live_bytes, p->allocated_bytes());
383 : }
384 : p->set_concurrent_sweeping_state(Page::kSweepingDone);
385 471029 : if (free_list_mode == IGNORE_FREE_LIST) return 0;
386 939104 : return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
387 : }
388 :
389 431156 : void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
390 : Page* page = nullptr;
391 1037344 : while (!stop_sweeper_tasks_ &&
392 : ((page = GetSweepingPageSafe(identity)) != nullptr)) {
393 303061 : ParallelSweepPage(page, identity);
394 : }
395 432076 : }
396 :
397 19926 : bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
398 19926 : if (Page* page = GetSweepingPageSafe(identity)) {
399 19886 : ParallelSweepPage(page, identity);
400 : }
401 39852 : return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
402 : }
403 :
404 281057 : int Sweeper::ParallelSweepSpace(AllocationSpace identity,
405 : int required_freed_bytes, int max_pages) {
406 : int max_freed = 0;
407 : int pages_freed = 0;
408 : Page* page = nullptr;
409 383098 : while ((page = GetSweepingPageSafe(identity)) != nullptr) {
410 146710 : int freed = ParallelSweepPage(page, identity);
411 146711 : pages_freed += 1;
412 : DCHECK_GE(freed, 0);
413 : max_freed = Max(max_freed, freed);
414 146711 : if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
415 : return max_freed;
416 107419 : if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
417 : }
418 : return max_freed;
419 : }
420 :
421 472380 : int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
422 : // Early bailout for pages that are swept outside of the regular sweeping
423 : // path. This check here avoids taking the lock first, avoiding deadlocks.
424 944760 : if (page->SweepingDone()) return 0;
425 :
426 : DCHECK(IsValidSweepingSpace(identity));
427 : int max_freed = 0;
428 : {
429 469531 : base::MutexGuard guard(page->mutex());
430 : // If this page was already swept in the meantime, we can return here.
431 939012 : if (page->SweepingDone()) return 0;
432 :
433 : // If the page is a code page, the CodePageMemoryModificationScope changes
434 : // the page protection mode from rx -> rw while sweeping.
435 469506 : CodePageMemoryModificationScope code_page_scope(page);
436 :
437 : DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
438 469419 : page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
439 : const FreeSpaceTreatmentMode free_space_mode =
440 : Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
441 469419 : max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
442 : DCHECK(page->SweepingDone());
443 :
444 : // After finishing sweeping of a page we clean up its remembered set.
445 469496 : TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
446 469496 : if (typed_slot_set) {
447 977 : typed_slot_set->FreeToBeFreedChunks();
448 : }
449 469517 : SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
450 469517 : if (slot_set) {
451 301622 : slot_set->FreeToBeFreedBuckets();
452 : }
453 : }
454 :
455 : {
456 469563 : base::MutexGuard guard(&mutex_);
457 469644 : swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
458 : }
459 469641 : return max_freed;
460 : }
461 :
462 79195 : void Sweeper::ScheduleIncrementalSweepingTask() {
463 79195 : if (!incremental_sweeper_pending_) {
464 25747 : incremental_sweeper_pending_ = true;
465 25747 : v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
466 : auto taskrunner =
467 25747 : V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
468 25747 : taskrunner->PostTask(
469 102988 : base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
470 : }
471 79195 : }
472 :
473 469883 : void Sweeper::AddPage(AllocationSpace space, Page* page,
474 : Sweeper::AddPageMode mode) {
475 469883 : base::MutexGuard guard(&mutex_);
476 : DCHECK(IsValidSweepingSpace(space));
477 : DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
478 469883 : if (mode == Sweeper::REGULAR) {
479 469644 : PrepareToBeSweptPage(space, page);
480 : } else {
481 : // Page has been temporarily removed from the sweeper. Accounting already
482 : // happened when the page was initially added, so it is skipped here.
483 : DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
484 : }
485 : DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
486 469883 : sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
487 469883 : }
488 :
489 0 : void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
490 : DCHECK_GE(page->area_size(),
491 : static_cast<size_t>(marking_state_->live_bytes(page)));
492 : DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
493 : page->ForAllFreeListCategories(
494 : [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
495 : page->set_concurrent_sweeping_state(Page::kSweepingPending);
496 469644 : heap_->paged_space(space)->IncreaseAllocatedBytes(
497 : marking_state_->live_bytes(page), page);
498 0 : }
499 :
500 1135288 : Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
501 1135288 : base::MutexGuard guard(&mutex_);
502 : DCHECK(IsValidSweepingSpace(space));
503 : int space_index = GetSweepSpaceIndex(space);
504 : Page* page = nullptr;
505 1136607 : if (!sweeping_list_[space_index].empty()) {
506 469693 : page = sweeping_list_[space_index].front();
507 469693 : sweeping_list_[space_index].pop_front();
508 : }
509 1136521 : return page;
510 : }
511 :
512 586003 : void Sweeper::EnsurePageIsIterable(Page* page) {
513 : AllocationSpace space = page->owner()->identity();
514 586003 : if (IsValidSweepingSpace(space)) {
515 577733 : SweepOrWaitUntilSweepingCompleted(page);
516 : } else {
517 : DCHECK(IsValidIterabilitySpace(space));
518 8270 : EnsureIterabilityCompleted();
519 : }
520 586003 : }
521 :
522 103198 : void Sweeper::EnsureIterabilityCompleted() {
523 103198 : if (!iterability_in_progress_) return;
524 :
525 73955 : if (FLAG_concurrent_sweeping && iterability_task_started_) {
526 782 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(
527 : iterability_task_id_) != TryAbortResult::kTaskAborted) {
528 368 : iterability_task_semaphore_.Wait();
529 : }
530 391 : iterability_task_started_ = false;
531 : }
532 :
533 74339 : for (Page* page : iterability_list_) {
534 : MakeIterable(page);
535 : }
536 : iterability_list_.clear();
537 73955 : iterability_in_progress_ = false;
538 : }
539 :
540 : class Sweeper::IterabilityTask final : public CancelableTask {
541 : public:
542 : IterabilityTask(Isolate* isolate, Sweeper* sweeper,
543 : base::Semaphore* pending_iterability_task)
544 : : CancelableTask(isolate),
545 : sweeper_(sweeper),
546 : pending_iterability_task_(pending_iterability_task),
547 782 : tracer_(isolate->heap()->tracer()) {}
548 :
549 782 : ~IterabilityTask() override = default;
550 :
551 : private:
552 368 : void RunInternal() final {
553 1472 : TRACE_BACKGROUND_GC(tracer_,
554 : GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
555 1896 : for (Page* page : sweeper_->iterability_list_) {
556 1160 : sweeper_->MakeIterable(page);
557 : }
558 368 : sweeper_->iterability_list_.clear();
559 368 : pending_iterability_task_->Signal();
560 368 : }
561 :
562 : Sweeper* const sweeper_;
563 : base::Semaphore* const pending_iterability_task_;
564 : GCTracer* const tracer_;
565 :
566 : DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
567 : };
568 :
569 73955 : void Sweeper::StartIterabilityTasks() {
570 73955 : if (!iterability_in_progress_) return;
571 :
572 : DCHECK(!iterability_task_started_);
573 73955 : if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
574 : auto task = base::make_unique<IterabilityTask>(
575 782 : heap_->isolate(), this, &iterability_task_semaphore_);
576 391 : iterability_task_id_ = task->id();
577 391 : iterability_task_started_ = true;
578 1173 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
579 : }
580 : }
581 :
582 1544 : void Sweeper::AddPageForIterability(Page* page) {
583 : DCHECK(sweeping_in_progress_);
584 : DCHECK(iterability_in_progress_);
585 : DCHECK(!iterability_task_started_);
586 : DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
587 : DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
588 :
589 1544 : iterability_list_.push_back(page);
590 1544 : page->set_concurrent_sweeping_state(Page::kSweepingPending);
591 1544 : }
592 :
593 0 : void Sweeper::MakeIterable(Page* page) {
594 : DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
595 : const FreeSpaceTreatmentMode free_space_mode =
596 : Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
597 1544 : RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
598 0 : }
599 :
600 : } // namespace internal
601 120216 : } // namespace v8
|