Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_MARK_COMPACT_H_
6 : #define V8_HEAP_MARK_COMPACT_H_
7 :
8 : #include <deque>
9 : #include <vector>
10 :
11 : #include "src/heap/marking.h"
12 : #include "src/heap/objects-visiting.h"
13 : #include "src/heap/spaces.h"
14 : #include "src/heap/worklist.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 :
19 : // Forward declarations.
20 : class EvacuationJobTraits;
21 : class HeapObjectVisitor;
22 : class ItemParallelJob;
23 : class MigrationObserver;
24 : class RecordMigratedSlotVisitor;
25 : class UpdatingItem;
26 : class YoungGenerationMarkingVisitor;
27 :
28 : template <typename ConcreteState, AccessMode access_mode>
29 : class MarkingStateBase {
30 : public:
31 : V8_INLINE MarkBit MarkBitFrom(HeapObject* obj) {
32 : return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
33 5115119433 : obj->address());
34 : }
35 :
36 0 : V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
37 : return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
38 5761078955 : p->AddressToMarkbitIndex(addr));
39 : }
40 :
41 0 : Marking::ObjectColor Color(HeapObject* obj) {
42 0 : return Marking::Color(MarkBitFrom(obj));
43 : }
44 :
45 : V8_INLINE bool IsImpossible(HeapObject* obj) {
46 : return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
47 : }
48 :
49 : V8_INLINE bool IsBlack(HeapObject* obj) {
50 : return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
51 : }
52 :
53 : V8_INLINE bool IsWhite(HeapObject* obj) {
54 : return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
55 : }
56 :
57 : V8_INLINE bool IsGrey(HeapObject* obj) {
58 : return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
59 : }
60 :
61 : V8_INLINE bool IsBlackOrGrey(HeapObject* obj) {
62 : return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
63 : }
64 :
65 : V8_INLINE bool WhiteToGrey(HeapObject* obj) {
66 : return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
67 : }
68 :
69 : V8_INLINE bool WhiteToBlack(HeapObject* obj) {
70 33902293 : return WhiteToGrey(obj) && GreyToBlack(obj);
71 : }
72 :
73 : V8_INLINE bool GreyToBlack(HeapObject* obj) {
74 608003703 : MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
75 : MarkBit markbit = MarkBitFrom(p, obj->address());
76 625450175 : if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
77 623302283 : static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
78 : return true;
79 : }
80 :
81 0 : void ClearLiveness(MemoryChunk* chunk) {
82 1199269 : static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
83 : static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
84 : }
85 : };
86 :
87 : class MarkBitCellIterator {
88 : public:
89 965788 : MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
90 : DCHECK(Bitmap::IsCellAligned(
91 : chunk_->AddressToMarkbitIndex(chunk_->area_start())));
92 : DCHECK(Bitmap::IsCellAligned(
93 : chunk_->AddressToMarkbitIndex(chunk_->area_end())));
94 : last_cell_index_ =
95 1931576 : Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
96 965788 : cell_base_ = chunk_->area_start();
97 : cell_index_ =
98 965788 : Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
99 965788 : cells_ = bitmap->cells();
100 : }
101 :
102 : inline bool Done() { return cell_index_ >= last_cell_index_; }
103 :
104 : inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
105 :
106 : inline MarkBit::CellType* CurrentCell() {
107 : DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
108 : chunk_->AddressToMarkbitIndex(cell_base_))));
109 640706767 : return &cells_[cell_index_];
110 : }
111 :
112 : inline Address CurrentCellBase() {
113 : DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
114 : chunk_->AddressToMarkbitIndex(cell_base_))));
115 : return cell_base_;
116 : }
117 :
118 : MUST_USE_RESULT inline bool Advance() {
119 513239377 : cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
120 513239377 : return ++cell_index_ != last_cell_index_;
121 : }
122 :
123 : inline bool Advance(unsigned int new_cell_index) {
124 610538161 : if (new_cell_index != cell_index_) {
125 : DCHECK_GT(new_cell_index, cell_index_);
126 : DCHECK_LE(new_cell_index, last_cell_index_);
127 127930392 : unsigned int diff = new_cell_index - cell_index_;
128 127930392 : cell_index_ = new_cell_index;
129 127930392 : cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
130 : return true;
131 : }
132 : return false;
133 : }
134 :
135 : // Return the next mark bit cell. If there is no next it returns 0;
136 : inline MarkBit::CellType PeekNext() {
137 : if (HasNext()) {
138 : return cells_[cell_index_ + 1];
139 : }
140 : return 0;
141 : }
142 :
143 : private:
144 : MemoryChunk* chunk_;
145 : MarkBit::CellType* cells_;
146 : unsigned int last_cell_index_;
147 : unsigned int cell_index_;
148 : Address cell_base_;
149 : };
150 :
151 : enum LiveObjectIterationMode {
152 : kBlackObjects,
153 : kGreyObjects,
154 : kAllLiveObjects
155 : };
156 :
157 : template <LiveObjectIterationMode mode>
158 : class LiveObjectRange {
159 : public:
160 : class iterator {
161 : public:
162 : using value_type = std::pair<HeapObject*, int /* size */>;
163 : using pointer = const value_type*;
164 : using reference = const value_type&;
165 : using iterator_category = std::forward_iterator_tag;
166 :
167 : inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
168 :
169 : inline iterator& operator++();
170 : inline iterator operator++(int);
171 :
172 : bool operator==(iterator other) const {
173 : return current_object_ == other.current_object_;
174 : }
175 :
176 615726649 : bool operator!=(iterator other) const { return !(*this == other); }
177 :
178 : value_type operator*() {
179 33369861 : return std::make_pair(current_object_, current_size_);
180 : }
181 :
182 : private:
183 : inline void AdvanceToNextValidObject();
184 :
185 : MemoryChunk* const chunk_;
186 : Map* const one_word_filler_map_;
187 : Map* const two_word_filler_map_;
188 : Map* const free_space_map_;
189 : MarkBitCellIterator it_;
190 : Address cell_base_;
191 : MarkBit::CellType current_cell_;
192 : HeapObject* current_object_;
193 : int current_size_;
194 : };
195 :
196 482897 : LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
197 : : chunk_(chunk),
198 : bitmap_(bitmap),
199 482897 : start_(chunk_->area_start()),
200 : end_(chunk->area_end()) {}
201 :
202 : inline iterator begin();
203 : inline iterator end();
204 :
205 : private:
206 : MemoryChunk* const chunk_;
207 : Bitmap* bitmap_;
208 : Address start_;
209 : Address end_;
210 : };
211 :
212 : class LiveObjectVisitor : AllStatic {
213 : public:
214 : enum IterationMode {
215 : kKeepMarking,
216 : kClearMarkbits,
217 : };
218 :
219 : // Visits black objects on a MemoryChunk until the Visitor returns |false| for
220 : // an object. If IterationMode::kClearMarkbits is passed the markbits and
221 : // slots for visited objects are cleared for each successfully visited object.
222 : template <class Visitor, typename MarkingState>
223 28 : static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
224 : Visitor* visitor, IterationMode iteration_mode,
225 : HeapObject** failed_object);
226 :
227 : // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
228 : // visitation for an object.
229 : template <class Visitor, typename MarkingState>
230 : static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
231 : Visitor* visitor,
232 : IterationMode iteration_mode);
233 :
234 : // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
235 : // visitation for an object.
236 : template <class Visitor, typename MarkingState>
237 0 : static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
238 : Visitor* visitor,
239 : IterationMode iteration_mode);
240 :
241 : template <typename MarkingState>
242 : static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
243 : };
244 :
245 : enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
246 : enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
247 : enum MarkingTreatmentMode { KEEP, CLEAR };
248 : enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
249 :
250 : // Base class for minor and full MC collectors.
251 : class MarkCompactCollectorBase {
252 : public:
253 106730 : virtual ~MarkCompactCollectorBase() {}
254 :
255 : virtual void SetUp() = 0;
256 : virtual void TearDown() = 0;
257 : virtual void CollectGarbage() = 0;
258 :
259 74792453 : inline Heap* heap() const { return heap_; }
260 686187 : inline Isolate* isolate() { return heap()->isolate(); }
261 :
262 : protected:
263 : explicit MarkCompactCollectorBase(Heap* heap)
264 109998 : : heap_(heap), old_to_new_slots_(0) {}
265 :
266 : // Marking operations for objects reachable from roots.
267 : virtual void MarkLiveObjects() = 0;
268 : // Mark objects reachable (transitively) from objects in the marking
269 : // work list.
270 : virtual void ProcessMarkingWorklist() = 0;
271 : // Clear non-live references held in side data structures.
272 : virtual void ClearNonLiveReferences() = 0;
273 : virtual void EvacuatePrologue() = 0;
274 : virtual void EvacuateEpilogue() = 0;
275 : virtual void Evacuate() = 0;
276 : virtual void EvacuatePagesInParallel() = 0;
277 : virtual void UpdatePointersAfterEvacuation() = 0;
278 : virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
279 : Address start,
280 : Address end) = 0;
281 : virtual UpdatingItem* CreateRememberedSetUpdatingItem(
282 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
283 :
284 : template <class Evacuator, class Collector>
285 : void CreateAndExecuteEvacuationTasks(
286 : Collector* collector, ItemParallelJob* job,
287 : RecordMigratedSlotVisitor* record_visitor,
288 : MigrationObserver* migration_observer, const intptr_t live_bytes);
289 :
290 : // Returns whether this page should be moved according to heuristics.
291 : bool ShouldMovePage(Page* p, intptr_t live_bytes);
292 :
293 : int CollectToSpaceUpdatingItems(ItemParallelJob* job);
294 : template <typename IterateableSpace>
295 : int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
296 : IterateableSpace* space,
297 : RememberedSetUpdatingMode mode);
298 :
299 : int NumberOfParallelCompactionTasks(int pages);
300 : int NumberOfParallelPointerUpdateTasks(int pages, int slots);
301 : int NumberOfParallelToSpacePointerUpdateTasks(int pages);
302 :
303 : Heap* heap_;
304 : // Number of old to new slots. Should be computed during MarkLiveObjects.
305 : // -1 indicates that the value couldn't be computed.
306 : int old_to_new_slots_;
307 : };
308 :
309 : class MinorMarkingState final
310 : : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
311 : public:
312 0 : Bitmap* bitmap(const MemoryChunk* chunk) const {
313 0 : return chunk->young_generation_bitmap_;
314 : }
315 :
316 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
317 : reinterpret_cast<base::AtomicNumber<intptr_t>*>(
318 : &chunk->young_generation_live_byte_count_)
319 : ->Increment(by);
320 : }
321 :
322 : intptr_t live_bytes(MemoryChunk* chunk) const {
323 : return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
324 : &chunk->young_generation_live_byte_count_)
325 : ->Value();
326 : }
327 :
328 : void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
329 : reinterpret_cast<base::AtomicNumber<intptr_t>*>(
330 : &chunk->young_generation_live_byte_count_)
331 : ->SetValue(value);
332 : }
333 : };
334 :
335 : class MinorNonAtomicMarkingState final
336 : : public MarkingStateBase<MinorNonAtomicMarkingState,
337 : AccessMode::NON_ATOMIC> {
338 : public:
339 0 : Bitmap* bitmap(const MemoryChunk* chunk) const {
340 0 : return chunk->young_generation_bitmap_;
341 : }
342 :
343 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
344 : chunk->young_generation_live_byte_count_ += by;
345 : }
346 :
347 : intptr_t live_bytes(MemoryChunk* chunk) const {
348 : return chunk->young_generation_live_byte_count_;
349 : }
350 :
351 : void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
352 0 : chunk->young_generation_live_byte_count_ = value;
353 : }
354 : };
355 :
356 : // Collector for young-generation only.
357 : class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
358 : public:
359 : using MarkingState = MinorMarkingState;
360 : using NonAtomicMarkingState = MinorNonAtomicMarkingState;
361 :
362 : explicit MinorMarkCompactCollector(Heap* heap);
363 : ~MinorMarkCompactCollector();
364 :
365 : MarkingState* marking_state() { return &marking_state_; }
366 :
367 : NonAtomicMarkingState* non_atomic_marking_state() {
368 : return &non_atomic_marking_state_;
369 : }
370 :
371 : void SetUp() override;
372 : void TearDown() override;
373 : void CollectGarbage() override;
374 :
375 : void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
376 : FreeSpaceTreatmentMode free_space_mode);
377 : void CleanupSweepToIteratePages();
378 :
379 : private:
380 : using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
381 : class RootMarkingVisitor;
382 :
383 : static const int kNumMarkers = 8;
384 : static const int kMainMarker = 0;
385 :
386 : inline MarkingWorklist* worklist() { return worklist_; }
387 :
388 : inline YoungGenerationMarkingVisitor* main_marking_visitor() {
389 : return main_marking_visitor_;
390 : }
391 :
392 : void MarkLiveObjects() override;
393 : void MarkRootSetInParallel();
394 : void ProcessMarkingWorklist() override;
395 : void ClearNonLiveReferences() override;
396 :
397 : void EvacuatePrologue() override;
398 : void EvacuateEpilogue() override;
399 : void Evacuate() override;
400 : void EvacuatePagesInParallel() override;
401 : void UpdatePointersAfterEvacuation() override;
402 :
403 : UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
404 : Address end) override;
405 : UpdatingItem* CreateRememberedSetUpdatingItem(
406 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
407 :
408 : int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
409 :
410 : int NumberOfParallelMarkingTasks(int pages);
411 :
412 : MarkingWorklist* worklist_;
413 :
414 : YoungGenerationMarkingVisitor* main_marking_visitor_;
415 : base::Semaphore page_parallel_job_semaphore_;
416 : std::vector<Page*> new_space_evacuation_pages_;
417 : std::vector<Page*> sweep_to_iterate_pages_;
418 :
419 : MarkingState marking_state_;
420 : NonAtomicMarkingState non_atomic_marking_state_;
421 :
422 : friend class YoungGenerationMarkingTask;
423 : friend class YoungGenerationMarkingVisitor;
424 : };
425 :
426 : class MajorAtomicMarkingState final
427 : : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
428 : public:
429 1852581188 : Bitmap* bitmap(const MemoryChunk* chunk) const {
430 1852581188 : return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
431 : }
432 :
433 216360442 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
434 : reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
435 : ->Increment(by);
436 216364804 : }
437 :
438 : intptr_t live_bytes(MemoryChunk* chunk) const {
439 : return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
440 : &chunk->live_byte_count_)
441 : ->Value();
442 : }
443 :
444 : void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
445 : reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
446 : ->SetValue(value);
447 : }
448 : };
449 :
450 : class MajorNonAtomicMarkingState final
451 : : public MarkingStateBase<MajorNonAtomicMarkingState,
452 : AccessMode::NON_ATOMIC> {
453 : public:
454 257545865 : Bitmap* bitmap(const MemoryChunk* chunk) const {
455 257545865 : return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
456 : }
457 :
458 93263 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
459 349348 : chunk->live_byte_count_ += by;
460 93263 : }
461 :
462 : intptr_t live_bytes(MemoryChunk* chunk) const {
463 : return chunk->live_byte_count_;
464 : }
465 :
466 : void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
467 1874136 : chunk->live_byte_count_ = value;
468 : }
469 : };
470 :
471 : // Weak objects encountered during marking.
472 218409 : struct WeakObjects {
473 : Worklist<WeakCell*, 64> weak_cells;
474 : Worklist<TransitionArray*, 64> transition_arrays;
475 : };
476 :
477 : // Collector for young and old generation.
478 213460 : class MarkCompactCollector final : public MarkCompactCollectorBase {
479 : public:
480 : using AtomicMarkingState = MajorAtomicMarkingState;
481 : using NonAtomicMarkingState = MajorNonAtomicMarkingState;
482 :
483 : static const int kMainThread = 0;
484 : // Wrapper for the shared and bailout worklists.
485 53365 : class MarkingWorklist {
486 : public:
487 : using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
488 :
489 : // The heap parameter is not used but needed to match the sequential case.
490 219996 : explicit MarkingWorklist(Heap* heap) {}
491 :
492 297089526 : void Push(HeapObject* object) {
493 382462596 : bool success = shared_.Push(kMainThread, object);
494 : USE(success);
495 : DCHECK(success);
496 297089159 : }
497 :
498 1623 : void PushBailout(HeapObject* object) {
499 1361522 : bool success = bailout_.Push(kMainThread, object);
500 : USE(success);
501 : DCHECK(success);
502 1623 : }
503 :
504 261567961 : HeapObject* Pop() {
505 : HeapObject* result;
506 : #ifdef V8_CONCURRENT_MARKING
507 261567961 : if (bailout_.Pop(kMainThread, &result)) return result;
508 : #endif
509 181625291 : if (shared_.Pop(kMainThread, &result)) return result;
510 : #ifdef V8_CONCURRENT_MARKING
511 : // The expectation is that this work list is empty almost all the time
512 : // and we can thus avoid the emptiness checks by putting it last.
513 3088218 : if (on_hold_.Pop(kMainThread, &result)) return result;
514 : #endif
515 : return nullptr;
516 : }
517 :
518 1070 : void Clear() {
519 1070 : bailout_.Clear();
520 1070 : shared_.Clear();
521 1070 : on_hold_.Clear();
522 1070 : }
523 :
524 : bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
525 :
526 427875 : bool IsEmpty() {
527 366983 : return bailout_.IsLocalEmpty(kMainThread) &&
528 344457 : shared_.IsLocalEmpty(kMainThread) &&
529 344453 : on_hold_.IsLocalEmpty(kMainThread) &&
530 1116094 : bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
531 427875 : on_hold_.IsGlobalPoolEmpty();
532 : }
533 :
534 : int Size() {
535 : return static_cast<int>(bailout_.LocalSize(kMainThread) +
536 : shared_.LocalSize(kMainThread) +
537 : on_hold_.LocalSize(kMainThread));
538 : }
539 :
540 : // Calls the specified callback on each element of the deques and replaces
541 : // the element with the result of the callback. If the callback returns
542 : // nullptr then the element is removed from the deque.
543 : // The callback must accept HeapObject* and return HeapObject*.
544 : template <typename Callback>
545 2153 : void Update(Callback callback) {
546 2153 : bailout_.Update(callback);
547 2153 : shared_.Update(callback);
548 2153 : on_hold_.Update(callback);
549 2153 : }
550 :
551 : ConcurrentMarkingWorklist* shared() { return &shared_; }
552 : ConcurrentMarkingWorklist* bailout() { return &bailout_; }
553 : ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
554 :
555 0 : void Print() {
556 0 : PrintWorklist("shared", &shared_);
557 0 : PrintWorklist("bailout", &bailout_);
558 0 : PrintWorklist("on_hold", &on_hold_);
559 0 : }
560 :
561 : private:
562 : // Prints the stats about the global pool of the worklist.
563 0 : void PrintWorklist(const char* worklist_name,
564 : ConcurrentMarkingWorklist* worklist) {
565 : std::map<InstanceType, int> count;
566 0 : int total_count = 0;
567 0 : worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
568 0 : ++total_count;
569 0 : count[obj->map()->instance_type()]++;
570 0 : });
571 : std::vector<std::pair<int, InstanceType>> rank;
572 0 : for (auto i : count) {
573 0 : rank.push_back(std::make_pair(i.second, i.first));
574 : }
575 : std::map<InstanceType, std::string> instance_type_name;
576 : #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
577 0 : INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
578 : #undef INSTANCE_TYPE_NAME
579 : std::sort(rank.begin(), rank.end(),
580 0 : std::greater<std::pair<int, InstanceType>>());
581 0 : PrintF("Worklist %s: %d\n", worklist_name, total_count);
582 0 : for (auto i : rank) {
583 0 : PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
584 : }
585 0 : }
586 : ConcurrentMarkingWorklist shared_;
587 : ConcurrentMarkingWorklist bailout_;
588 : ConcurrentMarkingWorklist on_hold_;
589 : };
590 :
591 : class RootMarkingVisitor;
592 : class CustomRootBodyMarkingVisitor;
593 :
594 160095 : class Sweeper {
595 : public:
596 : // Pauses the sweeper tasks or completes sweeping.
597 : class PauseOrCompleteScope {
598 : public:
599 : explicit PauseOrCompleteScope(Sweeper* sweeper);
600 : ~PauseOrCompleteScope();
601 :
602 : private:
603 : Sweeper* const sweeper_;
604 : };
605 :
606 : enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
607 : enum ClearOldToNewSlotsMode {
608 : DO_NOT_CLEAR,
609 : CLEAR_REGULAR_SLOTS,
610 : CLEAR_TYPED_SLOTS
611 : };
612 :
613 : typedef std::deque<Page*> SweepingList;
614 : typedef std::vector<Page*> SweptList;
615 :
616 : int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
617 : FreeSpaceTreatmentMode free_space_mode);
618 :
619 54998 : explicit Sweeper(Heap* heap,
620 : MarkCompactCollector::NonAtomicMarkingState* marking_state)
621 : : heap_(heap),
622 : marking_state_(marking_state),
623 : num_tasks_(0),
624 : pending_sweeper_tasks_semaphore_(0),
625 : sweeping_in_progress_(false),
626 : num_sweeping_tasks_(0),
627 549989 : stop_sweeper_tasks_(false) {}
628 :
629 : bool sweeping_in_progress() const { return sweeping_in_progress_; }
630 :
631 : void AddPage(AllocationSpace space, Page* page);
632 :
633 : int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
634 : int max_pages = 0);
635 : int ParallelSweepPage(Page* page, AllocationSpace identity);
636 :
637 : // After calling this function sweeping is considered to be in progress
638 : // and the main thread can sweep lazily, but the background sweeper tasks
639 : // are not running yet.
640 : void StartSweeping();
641 : void StartSweeperTasks();
642 : void EnsureCompleted();
643 : void EnsureNewSpaceCompleted();
644 : bool AreSweeperTasksRunning();
645 : void SweepOrWaitUntilSweepingCompleted(Page* page);
646 :
647 : void AddSweptPageSafe(PagedSpace* space, Page* page);
648 : Page* GetSweptPageSafe(PagedSpace* space);
649 :
650 : private:
651 : class SweeperTask;
652 :
653 : static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
654 : static const int kMaxSweeperTasks = kAllocationSpaces;
655 :
656 : template <typename Callback>
657 56735 : void ForAllSweepingSpaces(Callback callback) {
658 906388 : for (int i = 0; i < kAllocationSpaces; i++) {
659 906388 : callback(static_cast<AllocationSpace>(i));
660 : }
661 56735 : }
662 :
663 : // Can only be called on the main thread when no tasks are running.
664 : bool IsDoneSweeping() const {
665 2571 : for (int i = 0; i < kAllocationSpaces; i++) {
666 5478 : if (!sweeping_list_[i].empty()) return false;
667 : }
668 : return true;
669 : }
670 :
671 : void SweepSpaceFromTask(AllocationSpace identity);
672 :
673 : void AbortAndWaitForTasks();
674 :
675 : Page* GetSweepingPageSafe(AllocationSpace space);
676 :
677 : void PrepareToBeSweptPage(AllocationSpace space, Page* page);
678 :
679 : Heap* const heap_;
680 : MarkCompactCollector::NonAtomicMarkingState* marking_state_;
681 : int num_tasks_;
682 : CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
683 : base::Semaphore pending_sweeper_tasks_semaphore_;
684 : base::Mutex mutex_;
685 : SweptList swept_list_[kAllocationSpaces];
686 : SweepingList sweeping_list_[kAllocationSpaces];
687 : bool sweeping_in_progress_;
688 : // Counter is actively maintained by the concurrent tasks to avoid querying
689 : // the semaphore for maintaining a task counter on the main thread.
690 : base::AtomicNumber<intptr_t> num_sweeping_tasks_;
691 : // Used by PauseOrCompleteScope to signal early bailout to tasks.
692 : base::AtomicValue<bool> stop_sweeper_tasks_;
693 : };
694 :
695 : enum IterationMode {
696 : kKeepMarking,
697 : kClearMarkbits,
698 : };
699 :
700 : AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
701 :
702 : NonAtomicMarkingState* non_atomic_marking_state() {
703 : return &non_atomic_marking_state_;
704 : }
705 :
706 : void SetUp() override;
707 : void TearDown() override;
708 : // Performs a global garbage collection.
709 : void CollectGarbage() override;
710 :
711 : void CollectEvacuationCandidates(PagedSpace* space);
712 :
713 : void AddEvacuationCandidate(Page* p);
714 :
715 : // Prepares for GC by resetting relocation info in old and map spaces and
716 : // choosing spaces to compact.
717 : void Prepare();
718 :
719 : void FinishConcurrentMarking();
720 :
721 : bool StartCompaction();
722 :
723 : void AbortCompaction();
724 :
725 : static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
726 : return Page::FromAddress(reinterpret_cast<Address>(obj))
727 : ->IsEvacuationCandidate();
728 : }
729 :
730 : void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
731 : V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
732 : Object* target);
733 : void RecordLiveSlotsOnPage(Page* page);
734 :
735 : void UpdateSlots(SlotsBuffer* buffer);
736 : void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
737 :
738 : void ClearMarkbits();
739 :
740 : bool is_compacting() const { return compacting_; }
741 :
742 : // Ensures that sweeping is finished.
743 : //
744 : // Note: Can only be called safely from main thread.
745 : void EnsureSweepingCompleted();
746 :
747 : // Checks if sweeping is in progress right now on any space.
748 496081 : bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
749 :
750 113600 : void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
751 :
752 : bool evacuation() const { return evacuation_; }
753 :
754 297088031 : MarkingWorklist* marking_worklist() { return &marking_worklist_; }
755 :
756 : WeakObjects* weak_objects() { return &weak_objects_; }
757 :
758 2840382 : void AddWeakCell(WeakCell* weak_cell) {
759 2840382 : weak_objects_.weak_cells.Push(kMainThread, weak_cell);
760 2840382 : }
761 :
762 264672 : void AddTransitionArray(TransitionArray* array) {
763 308739 : weak_objects_.transition_arrays.Push(kMainThread, array);
764 264672 : }
765 :
766 : Sweeper& sweeper() { return sweeper_; }
767 :
768 : #ifdef DEBUG
769 : // Checks whether performing mark-compact collection.
770 : bool in_use() { return state_ > PREPARE_GC; }
771 : bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
772 : #endif
773 :
774 : void VerifyMarking();
775 : #ifdef VERIFY_HEAP
776 : void VerifyValidStoreAndSlotsBufferEntries();
777 : void VerifyMarkbitsAreClean();
778 : void VerifyMarkbitsAreClean(PagedSpace* space);
779 : void VerifyMarkbitsAreClean(NewSpace* space);
780 : void VerifyWeakEmbeddedObjectsInCode();
781 : #endif
782 :
783 : private:
784 : explicit MarkCompactCollector(Heap* heap);
785 :
786 : bool WillBeDeoptimized(Code* code);
787 :
788 : void ComputeEvacuationHeuristics(size_t area_size,
789 : int* target_fragmentation_percent,
790 : size_t* max_evacuated_bytes);
791 :
792 : void VisitAllObjects(HeapObjectVisitor* visitor);
793 :
794 : void RecordObjectStats();
795 :
796 : // Finishes GC, performs heap verification if enabled.
797 : void Finish();
798 :
799 : void MarkLiveObjects() override;
800 :
801 : // Marks the object black and adds it to the marking work list.
802 : // This is for non-incremental marking only.
803 : V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
804 :
805 : // Marks the object black and adds it to the marking work list.
806 : // This is for non-incremental marking only.
807 : V8_INLINE void MarkRootObject(Root root, HeapObject* obj);
808 :
809 : // Used by wrapper tracing.
810 : V8_INLINE void MarkExternallyReferencedObject(HeapObject* obj);
811 :
812 : // Mark the heap roots and all objects reachable from them.
813 : void MarkRoots(RootVisitor* root_visitor,
814 : ObjectVisitor* custom_root_body_visitor);
815 :
816 : // Mark the string table specially. References to internalized strings from
817 : // the string table are weak.
818 : void MarkStringTable(ObjectVisitor* visitor);
819 :
820 : // Mark objects reachable (transitively) from objects in the marking stack
821 : // or overflowed in the heap. This respects references only considered in
822 : // the final atomic marking pause including the following:
823 : // - Processing of objects reachable through Harmony WeakMaps.
824 : // - Objects reachable due to host application logic like object groups,
825 : // implicit references' groups, or embedder heap tracing.
826 : void ProcessEphemeralMarking(bool only_process_harmony_weak_collections);
827 :
828 : // If the call-site of the top optimized code was not prepared for
829 : // deoptimization, then treat embedded pointers in the code as strong as
830 : // otherwise they can die and try to deoptimize the underlying code.
831 : void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
832 :
833 : // Collects a list of dependent code from maps embedded in optimize code.
834 : DependentCode* DependentCodeListFromNonLiveMaps();
835 :
836 : // Drains the main thread marking work list. Will mark all pending objects
837 : // if no concurrent threads are running.
838 : void ProcessMarkingWorklist() override;
839 :
840 : // Callback function for telling whether the object *p is an unmarked
841 : // heap object.
842 : static bool IsUnmarkedHeapObject(Object** p);
843 :
844 : // Clear non-live references in weak cells, transition and descriptor arrays,
845 : // and deoptimize dependent code of non-live maps.
846 : void ClearNonLiveReferences() override;
847 : void MarkDependentCodeForDeoptimization(DependentCode* list);
848 : // Checks if the given weak cell is a simple transition from the parent map
849 : // of the given dead target. If so it clears the transition and trims
850 : // the descriptor array of the parent if needed.
851 : void ClearSimpleMapTransition(WeakCell* potential_transition,
852 : Map* dead_target);
853 : void ClearSimpleMapTransition(Map* map, Map* dead_target);
854 : // Compact every array in the global list of transition arrays and
855 : // trim the corresponding descriptor array if a transition target is non-live.
856 : void ClearFullMapTransitions();
857 : bool CompactTransitionArray(Map* map, TransitionArray* transitions,
858 : DescriptorArray* descriptors);
859 : void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
860 : void TrimEnumCache(Map* map, DescriptorArray* descriptors);
861 :
862 : // Mark all values associated with reachable keys in weak collections
863 : // encountered so far. This might push new object or even new weak maps onto
864 : // the marking stack.
865 : void ProcessWeakCollections();
866 :
867 : // After all reachable objects have been marked those weak map entries
868 : // with an unreachable key are removed from all encountered weak maps.
869 : // The linked list of all encountered weak maps is destroyed.
870 : void ClearWeakCollections();
871 :
872 : // We have to remove all encountered weak maps from the list of weak
873 : // collections when incremental marking is aborted.
874 : void AbortWeakCollections();
875 :
876 : // Goes through the list of encountered weak cells and clears those with
877 : // dead values. If the value is a dead map and the parent map transitions to
878 : // the dead map via weak cell, then this function also clears the map
879 : // transition.
880 : void ClearWeakCellsAndSimpleMapTransitions(
881 : DependentCode** dependent_code_list);
882 : void AbortWeakObjects();
883 :
884 : // Starts sweeping of spaces by contributing on the main thread and setting
885 : // up other pages for sweeping. Does not start sweeper tasks.
886 : void StartSweepSpaces();
887 : void StartSweepSpace(PagedSpace* space);
888 :
889 : void EvacuatePrologue() override;
890 : void EvacuateEpilogue() override;
891 : void Evacuate() override;
892 : void EvacuatePagesInParallel() override;
893 : void UpdatePointersAfterEvacuation() override;
894 :
895 : UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
896 : Address end) override;
897 : UpdatingItem* CreateRememberedSetUpdatingItem(
898 : MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
899 :
900 : int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
901 : int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
902 :
903 : void ReleaseEvacuationCandidates();
904 : void PostProcessEvacuationCandidates();
905 : void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
906 :
907 : void ClearMarkbitsInPagedSpace(PagedSpace* space);
908 : void ClearMarkbitsInNewSpace(NewSpace* space);
909 :
910 : base::Mutex mutex_;
911 : base::Semaphore page_parallel_job_semaphore_;
912 :
913 : #ifdef DEBUG
914 : enum CollectorState {
915 : IDLE,
916 : PREPARE_GC,
917 : MARK_LIVE_OBJECTS,
918 : SWEEP_SPACES,
919 : ENCODE_FORWARDING_ADDRESSES,
920 : UPDATE_POINTERS,
921 : RELOCATE_OBJECTS
922 : };
923 :
924 : // The current stage of the collector.
925 : CollectorState state_;
926 : #endif
927 :
928 : bool was_marked_incrementally_;
929 :
930 : bool evacuation_;
931 :
932 : // True if we are collecting slots to perform evacuation from evacuation
933 : // candidates.
934 : bool compacting_;
935 :
936 : bool black_allocation_;
937 :
938 : bool have_code_to_deoptimize_;
939 :
940 : MarkingWorklist marking_worklist_;
941 : WeakObjects weak_objects_;
942 :
943 : // Candidates for pages that should be evacuated.
944 : std::vector<Page*> evacuation_candidates_;
945 : // Pages that are actually processed during evacuation.
946 : std::vector<Page*> old_space_evacuation_pages_;
947 : std::vector<Page*> new_space_evacuation_pages_;
948 : std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
949 :
950 : Sweeper sweeper_;
951 :
952 : AtomicMarkingState atomic_marking_state_;
953 : NonAtomicMarkingState non_atomic_marking_state_;
954 :
955 : friend class FullEvacuator;
956 : friend class Heap;
957 : friend class RecordMigratedSlotVisitor;
958 : };
959 :
960 : template <FixedArrayVisitationMode fixed_array_mode,
961 : TraceRetainingPathMode retaining_path_mode, typename MarkingState>
962 109227560 : class MarkingVisitor final
963 : : public HeapVisitor<
964 : int,
965 : MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
966 : public:
967 : typedef HeapVisitor<
968 : int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
969 : Parent;
970 :
971 : V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
972 : MarkingState* marking_state);
973 :
974 : V8_INLINE bool ShouldVisitMapPointer() { return false; }
975 :
976 : V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
977 : V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
978 : V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
979 : V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
980 : V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
981 : V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
982 : V8_INLINE int VisitMap(Map* map, Map* object);
983 : V8_INLINE int VisitNativeContext(Map* map, Context* object);
984 : V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
985 : V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
986 :
987 : // ObjectVisitor implementation.
988 : V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
989 : V8_INLINE void VisitPointers(HeapObject* host, Object** start,
990 : Object** end) final;
991 : V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
992 : V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
993 : // Skip weak next code link.
994 0 : V8_INLINE void VisitNextCodeLink(Code* host, Object** p) final {}
995 :
996 : private:
997 : // Granularity in which FixedArrays are scanned if |fixed_array_mode|
998 : // is true.
999 : static const int kProgressBarScanningChunk = 32 * 1024;
1000 :
1001 : V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
1002 :
1003 : V8_INLINE void MarkMapContents(Map* map);
1004 :
1005 : // Marks the object black without pushing it on the marking work list. Returns
1006 : // true if the object needed marking and false otherwise.
1007 : V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
1008 :
1009 : // Marks the object grey and pushes it on the marking work list.
1010 : V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
1011 :
1012 1811844037 : MarkingState* marking_state() { return marking_state_; }
1013 :
1014 2933 : MarkCompactCollector::MarkingWorklist* marking_worklist() {
1015 2933 : return this->heap_->incremental_marking()->marking_worklist();
1016 : }
1017 :
1018 : Heap* const heap_;
1019 : MarkCompactCollector* const collector_;
1020 : MarkingState* const marking_state_;
1021 : };
1022 :
1023 : class EvacuationScope {
1024 : public:
1025 : explicit EvacuationScope(MarkCompactCollector* collector)
1026 : : collector_(collector) {
1027 : collector_->set_evacuation(true);
1028 : }
1029 :
1030 : ~EvacuationScope() { collector_->set_evacuation(false); }
1031 :
1032 : private:
1033 : MarkCompactCollector* collector_;
1034 : };
1035 :
1036 : } // namespace internal
1037 : } // namespace v8
1038 :
1039 : #endif // V8_HEAP_MARK_COMPACT_H_
|