Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_MARK_COMPACT_H_
6 : #define V8_HEAP_MARK_COMPACT_H_
7 :
8 : #include <deque>
9 :
10 : #include "src/base/bits.h"
11 : #include "src/base/platform/condition-variable.h"
12 : #include "src/cancelable-task.h"
13 : #include "src/heap/marking.h"
14 : #include "src/heap/spaces.h"
15 : #include "src/heap/store-buffer.h"
16 :
17 : namespace v8 {
18 : namespace internal {
19 :
20 : // Forward declarations.
21 : class CodeFlusher;
22 : class HeapObjectVisitor;
23 : class MarkCompactCollector;
24 : class MinorMarkCompactCollector;
25 : class MarkingVisitor;
26 : class ThreadLocalTop;
27 :
28 : class ObjectMarking : public AllStatic {
29 : public:
30 : V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj,
31 5299637437 : const MarkingState& state) {
32 6548393588 : const Address address = obj->address();
33 7961811249 : const MemoryChunk* p = MemoryChunk::FromAddress(address);
34 13261447389 : return state.bitmap()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
35 : }
36 :
37 0 : static Marking::ObjectColor Color(HeapObject* obj,
38 : const MarkingState& state) {
39 0 : return Marking::Color(ObjectMarking::MarkBitFrom(obj, state));
40 : }
41 :
42 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
43 : V8_INLINE static bool IsImpossible(HeapObject* obj,
44 : const MarkingState& state) {
45 : return Marking::IsImpossible<access_mode>(MarkBitFrom(obj, state));
46 : }
47 :
48 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
49 : V8_INLINE static bool IsBlack(HeapObject* obj, const MarkingState& state) {
50 : return Marking::IsBlack<access_mode>(MarkBitFrom(obj, state));
51 : }
52 :
53 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
54 : V8_INLINE static bool IsWhite(HeapObject* obj, const MarkingState& state) {
55 : return Marking::IsWhite<access_mode>(MarkBitFrom(obj, state));
56 : }
57 :
58 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
59 : V8_INLINE static bool IsGrey(HeapObject* obj, const MarkingState& state) {
60 : return Marking::IsGrey<access_mode>(MarkBitFrom(obj, state));
61 : }
62 :
63 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
64 : V8_INLINE static bool IsBlackOrGrey(HeapObject* obj,
65 : const MarkingState& state) {
66 : return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj, state));
67 : }
68 :
69 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
70 : V8_INLINE static bool BlackToGrey(HeapObject* obj,
71 835058 : const MarkingState& state) {
72 : DCHECK(
73 : (access_mode == MarkBit::ATOMIC || IsBlack<access_mode>(obj, state)));
74 : MarkBit markbit = MarkBitFrom(obj, state);
75 835058 : if (!Marking::BlackToGrey<access_mode>(markbit)) return false;
76 1670116 : state.IncrementLiveBytes<access_mode>(-obj->Size());
77 : return true;
78 : }
79 :
80 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
81 : V8_INLINE static bool WhiteToGrey(HeapObject* obj,
82 : const MarkingState& state) {
83 : DCHECK(
84 : (access_mode == MarkBit::ATOMIC || IsWhite<access_mode>(obj, state)));
85 : return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj, state));
86 : }
87 :
88 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
89 : V8_INLINE static bool WhiteToBlack(HeapObject* obj,
90 : const MarkingState& state) {
91 : DCHECK(
92 : (access_mode == MarkBit::ATOMIC || IsWhite<access_mode>(obj, state)));
93 661098976 : if (!ObjectMarking::WhiteToGrey<access_mode>(obj, state)) return false;
94 : return ObjectMarking::GreyToBlack<access_mode>(obj, state);
95 : }
96 :
97 : template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
98 : V8_INLINE static bool GreyToBlack(HeapObject* obj,
99 583599638 : const MarkingState& state) {
100 : DCHECK((access_mode == MarkBit::ATOMIC || IsGrey<access_mode>(obj, state)));
101 : MarkBit markbit = MarkBitFrom(obj, state);
102 708416837 : if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
103 1292016508 : state.IncrementLiveBytes<access_mode>(obj->Size());
104 : return true;
105 : }
106 :
107 : private:
108 : DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
109 : };
110 :
111 : // ----------------------------------------------------------------------------
112 : // Marking deque for tracing live objects.
113 59285 : class MarkingDeque {
114 : public:
115 : explicit MarkingDeque(Heap* heap)
116 : : backing_store_(nullptr),
117 : backing_store_committed_size_(0),
118 : array_(nullptr),
119 : top_(0),
120 : bottom_(0),
121 : mask_(0),
122 : overflowed_(false),
123 : in_use_(false),
124 : uncommit_task_pending_(false),
125 60782 : heap_(heap) {}
126 :
127 : void SetUp();
128 : void TearDown();
129 :
130 : // Ensures that the marking deque is committed and will stay committed until
131 : // StopUsing() is called.
132 : void StartUsing();
133 : void StopUsing();
134 : void Clear();
135 :
136 470066229 : inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
137 :
138 46552006 : inline bool IsEmpty() { return top_ == bottom_; }
139 :
140 : bool overflowed() const { return overflowed_; }
141 :
142 940 : void ClearOverflowed() { overflowed_ = false; }
143 :
144 835058 : void SetOverflowed() { overflowed_ = true; }
145 :
146 : // Push the object on the marking stack if there is room, otherwise mark the
147 : // deque as overflowed and wait for a rescan of the heap.
148 469050142 : INLINE(bool Push(HeapObject* object)) {
149 : DCHECK(object->IsHeapObject());
150 469050142 : if (IsFull()) {
151 835058 : SetOverflowed();
152 : return false;
153 : } else {
154 468215086 : array_[top_] = object;
155 468215086 : top_ = ((top_ + 1) & mask_);
156 : return true;
157 : }
158 : }
159 :
160 : INLINE(HeapObject* Pop()) {
161 : DCHECK(!IsEmpty());
162 468131079 : top_ = ((top_ - 1) & mask_);
163 468131079 : HeapObject* object = array_[top_];
164 : DCHECK(object->IsHeapObject());
165 : return object;
166 : }
167 :
168 : // Unshift the object into the marking stack if there is room, otherwise mark
169 : // the deque as overflowed and wait for a rescan of the heap.
170 43012 : INLINE(bool Unshift(HeapObject* object)) {
171 : DCHECK(object->IsHeapObject());
172 43012 : if (IsFull()) {
173 0 : SetOverflowed();
174 : return false;
175 : } else {
176 43012 : bottom_ = ((bottom_ - 1) & mask_);
177 43012 : array_[bottom_] = object;
178 : return true;
179 : }
180 : }
181 :
182 : template <typename Callback>
183 0 : void Iterate(Callback callback) {
184 0 : int i = bottom_;
185 0 : while (i != top_) {
186 0 : callback(array_[i]);
187 0 : i = (i + 1) & mask_;
188 : }
189 0 : }
190 :
191 : HeapObject** array() { return array_; }
192 : int bottom() { return bottom_; }
193 : int top() { return top_; }
194 : int mask() { return mask_; }
195 830 : void set_top(int top) { top_ = top; }
196 :
197 : private:
198 : // This task uncommits the marking_deque backing store if
199 : // markin_deque->in_use_ is false.
200 106144 : class UncommitTask : public CancelableTask {
201 : public:
202 : explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
203 53072 : : CancelableTask(isolate), marking_deque_(marking_deque) {}
204 :
205 : private:
206 : // CancelableTask override.
207 53006 : void RunInternal() override {
208 53006 : base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
209 53006 : if (!marking_deque_->in_use_) {
210 52736 : marking_deque_->Uncommit();
211 : }
212 53006 : marking_deque_->uncommit_task_pending_ = false;
213 53006 : }
214 :
215 : MarkingDeque* marking_deque_;
216 : DISALLOW_COPY_AND_ASSIGN(UncommitTask);
217 : };
218 :
219 : static const size_t kMaxSize = 4 * MB;
220 : static const size_t kMinSize = 256 * KB;
221 :
222 : // Must be called with mutex lock.
223 : void EnsureCommitted();
224 :
225 : // Must be called with mutex lock.
226 : void Uncommit();
227 :
228 : // Must be called with mutex lock.
229 : void StartUncommitTask();
230 :
231 : base::Mutex mutex_;
232 :
233 : base::VirtualMemory* backing_store_;
234 : size_t backing_store_committed_size_;
235 : HeapObject** array_;
236 : // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
237 : // empty when top_ == bottom_. It is full when top_ + 1 == bottom
238 : // (mod mask + 1).
239 : int top_;
240 : int bottom_;
241 : int mask_;
242 : bool overflowed_;
243 : // in_use_ == true after taking mutex lock implies that the marking deque is
244 : // committed and will stay committed at least until in_use_ == false.
245 : bool in_use_;
246 : bool uncommit_task_pending_;
247 : Heap* heap_;
248 :
249 : DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
250 : };
251 :
252 :
253 : // CodeFlusher collects candidates for code flushing during marking and
254 : // processes those candidates after marking has completed in order to
255 : // reset those functions referencing code objects that would otherwise
256 : // be unreachable. Code objects can be referenced in two ways:
257 : // - SharedFunctionInfo references unoptimized code.
258 : // - JSFunction references either unoptimized or optimized code.
259 : // We are not allowed to flush unoptimized code for functions that got
260 : // optimized or inlined into optimized code, because we might bailout
261 : // into the unoptimized code again during deoptimization.
262 : class CodeFlusher {
263 : public:
264 : explicit CodeFlusher(Isolate* isolate)
265 : : isolate_(isolate),
266 : jsfunction_candidates_head_(nullptr),
267 60782 : shared_function_info_candidates_head_(nullptr) {}
268 :
269 : inline void AddCandidate(SharedFunctionInfo* shared_info);
270 : inline void AddCandidate(JSFunction* function);
271 :
272 : void EvictCandidate(SharedFunctionInfo* shared_info);
273 : void EvictCandidate(JSFunction* function);
274 :
275 : void ProcessCandidates() {
276 53346 : ProcessSharedFunctionInfoCandidates();
277 53346 : ProcessJSFunctionCandidates();
278 : }
279 :
280 : inline void VisitListHeads(RootVisitor* v);
281 :
282 : template <typename StaticVisitor>
283 : inline void IteratePointersToFromSpace();
284 :
285 : private:
286 : void ProcessJSFunctionCandidates();
287 : void ProcessSharedFunctionInfoCandidates();
288 :
289 : static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
290 : static inline JSFunction* GetNextCandidate(JSFunction* candidate);
291 : static inline void SetNextCandidate(JSFunction* candidate,
292 : JSFunction* next_candidate);
293 : static inline void ClearNextCandidate(JSFunction* candidate,
294 : Object* undefined);
295 :
296 : static inline SharedFunctionInfo* GetNextCandidate(
297 : SharedFunctionInfo* candidate);
298 : static inline void SetNextCandidate(SharedFunctionInfo* candidate,
299 : SharedFunctionInfo* next_candidate);
300 : static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
301 :
302 : Isolate* isolate_;
303 : JSFunction* jsfunction_candidates_head_;
304 : SharedFunctionInfo* shared_function_info_candidates_head_;
305 :
306 : DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
307 : };
308 :
309 : class MarkBitCellIterator BASE_EMBEDDED {
310 : public:
311 621211 : MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
312 : last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
313 1242422 : chunk_->AddressToMarkbitIndex(chunk_->area_end())));
314 621211 : cell_base_ = chunk_->area_start();
315 : cell_index_ = Bitmap::IndexToCell(
316 621211 : Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
317 621211 : cells_ = state.bitmap()->cells();
318 : }
319 :
320 : inline bool Done() { return cell_index_ == last_cell_index_; }
321 :
322 : inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
323 :
324 : inline MarkBit::CellType* CurrentCell() {
325 : DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
326 : chunk_->AddressToMarkbitIndex(cell_base_))));
327 739694330 : return &cells_[cell_index_];
328 : }
329 :
330 : inline Address CurrentCellBase() {
331 : DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
332 : chunk_->AddressToMarkbitIndex(cell_base_))));
333 : return cell_base_;
334 : }
335 :
336 : MUST_USE_RESULT inline bool Advance() {
337 551242659 : cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
338 551242659 : return ++cell_index_ != last_cell_index_;
339 : }
340 :
341 : inline bool Advance(unsigned int new_cell_index) {
342 742247079 : if (new_cell_index != cell_index_) {
343 : DCHECK_GT(new_cell_index, cell_index_);
344 : DCHECK_LE(new_cell_index, last_cell_index_);
345 188347666 : unsigned int diff = new_cell_index - cell_index_;
346 188347666 : cell_index_ = new_cell_index;
347 188347666 : cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
348 : return true;
349 : }
350 : return false;
351 : }
352 :
353 : // Return the next mark bit cell. If there is no next it returns 0;
354 : inline MarkBit::CellType PeekNext() {
355 : if (HasNext()) {
356 : return cells_[cell_index_ + 1];
357 : }
358 : return 0;
359 : }
360 :
361 : private:
362 : MemoryChunk* chunk_;
363 : MarkBit::CellType* cells_;
364 : unsigned int last_cell_index_;
365 : unsigned int cell_index_;
366 : Address cell_base_;
367 : };
368 :
369 : // Grey objects can happen on black pages when black objects transition to
370 : // grey e.g. when calling RecordWrites on them.
371 : enum LiveObjectIterationMode {
372 : kBlackObjects,
373 : kGreyObjects,
374 : kAllLiveObjects
375 : };
376 :
377 : template <LiveObjectIterationMode T>
378 : class LiveObjectIterator BASE_EMBEDDED {
379 : public:
380 621211 : LiveObjectIterator(MemoryChunk* chunk, MarkingState state)
381 : : chunk_(chunk),
382 : it_(chunk_, state),
383 : cell_base_(it_.CurrentCellBase()),
384 1863633 : current_cell_(*it_.CurrentCell()) {}
385 :
386 : HeapObject* Next();
387 :
388 : private:
389 671939418 : inline Heap* heap() { return chunk_->heap(); }
390 :
391 : MemoryChunk* chunk_;
392 : MarkBitCellIterator it_;
393 : Address cell_base_;
394 : MarkBit::CellType current_cell_;
395 : };
396 :
397 : class LiveObjectVisitor BASE_EMBEDDED {
398 : public:
399 : enum IterationMode {
400 : kKeepMarking,
401 : kClearMarkbits,
402 : };
403 :
404 : // Visits black objects on a MemoryChunk until the Visitor returns for an
405 : // object. If IterationMode::kClearMarkbits is passed the markbits and slots
406 : // for visited objects are cleared for each successfully visited object.
407 : template <class Visitor>
408 104 : bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
409 : Visitor* visitor, IterationMode iteration_mode);
410 :
411 : private:
412 : void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
413 : };
414 :
415 : enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
416 :
417 : // Base class for minor and full MC collectors.
418 : class MarkCompactCollectorBase {
419 : public:
420 59285 : virtual ~MarkCompactCollectorBase() {}
421 :
422 : // Note: Make sure to refer to the instances by their concrete collector
423 : // type to avoid vtable lookups marking state methods when used in hot paths.
424 : virtual MarkingState marking_state(HeapObject* object) const = 0;
425 : virtual MarkingState marking_state(MemoryChunk* chunk) const = 0;
426 :
427 : virtual void SetUp() = 0;
428 : virtual void TearDown() = 0;
429 : virtual void CollectGarbage() = 0;
430 :
431 : inline Heap* heap() const { return heap_; }
432 342400 : inline Isolate* isolate() { return heap()->isolate(); }
433 :
434 : protected:
435 60782 : explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
436 :
437 : virtual void MarkLiveObjects() = 0;
438 :
439 : // The number of parallel compaction tasks, including the main thread.
440 : int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
441 :
442 : Heap* heap_;
443 : };
444 :
445 : // Collector for young-generation only.
446 0 : class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
447 : public:
448 0 : explicit MinorMarkCompactCollector(Heap* heap)
449 0 : : MarkCompactCollectorBase(heap), marking_deque_(heap) {}
450 :
451 0 : MarkingState marking_state(HeapObject* object) const override {
452 0 : return MarkingState::External(object);
453 : }
454 :
455 0 : MarkingState marking_state(MemoryChunk* chunk) const override {
456 0 : return MarkingState::External(chunk);
457 : }
458 :
459 : void SetUp() override;
460 : void TearDown() override;
461 : void CollectGarbage() override;
462 :
463 : private:
464 : class RootMarkingVisitor;
465 :
466 0 : inline MarkingDeque* marking_deque() { return &marking_deque_; }
467 :
468 : V8_INLINE void MarkObject(HeapObject* obj);
469 : V8_INLINE void PushBlack(HeapObject* obj);
470 :
471 : SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
472 : void MarkLiveObjects() override;
473 : void ProcessMarkingDeque();
474 : void EmptyMarkingDeque();
475 :
476 : MarkingDeque marking_deque_;
477 :
478 : friend class StaticYoungGenerationMarkingVisitor;
479 : };
480 :
481 : // Collector for young and old generation.
482 237140 : class MarkCompactCollector final : public MarkCompactCollectorBase {
483 : public:
484 : class RootMarkingVisitor;
485 :
486 177855 : class Sweeper {
487 : public:
488 : class SweeperTask;
489 :
490 : enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
491 : enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
492 : enum ClearOldToNewSlotsMode {
493 : DO_NOT_CLEAR,
494 : CLEAR_REGULAR_SLOTS,
495 : CLEAR_TYPED_SLOTS
496 : };
497 :
498 : typedef std::deque<Page*> SweepingList;
499 : typedef List<Page*> SweptList;
500 :
501 : static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
502 : FreeSpaceTreatmentMode free_space_mode);
503 :
504 60782 : explicit Sweeper(Heap* heap)
505 : : heap_(heap),
506 : pending_sweeper_tasks_semaphore_(0),
507 : semaphore_counter_(0),
508 : sweeping_in_progress_(false),
509 607820 : num_sweeping_tasks_(0) {}
510 :
511 : bool sweeping_in_progress() { return sweeping_in_progress_; }
512 :
513 : void AddPage(AllocationSpace space, Page* page);
514 :
515 : int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
516 : int max_pages = 0);
517 : int ParallelSweepPage(Page* page, AllocationSpace identity);
518 :
519 : // After calling this function sweeping is considered to be in progress
520 : // and the main thread can sweep lazily, but the background sweeper tasks
521 : // are not running yet.
522 : void StartSweeping();
523 : void StartSweeperTasks();
524 : void EnsureCompleted();
525 : void EnsureNewSpaceCompleted();
526 : bool AreSweeperTasksRunning();
527 : void SweepOrWaitUntilSweepingCompleted(Page* page);
528 :
529 : void AddSweptPageSafe(PagedSpace* space, Page* page);
530 : Page* GetSweptPageSafe(PagedSpace* space);
531 :
532 : private:
533 : static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
534 :
535 : static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
536 :
537 : template <typename Callback>
538 53317 : void ForAllSweepingSpaces(Callback callback) {
539 852172 : for (int i = 0; i < kAllocationSpaces; i++) {
540 852172 : callback(static_cast<AllocationSpace>(i));
541 : }
542 53317 : }
543 :
544 : Page* GetSweepingPageSafe(AllocationSpace space);
545 : void AddSweepingPageSafe(AllocationSpace space, Page* page);
546 :
547 : void PrepareToBeSweptPage(AllocationSpace space, Page* page);
548 :
549 : Heap* heap_;
550 : base::Semaphore pending_sweeper_tasks_semaphore_;
551 : // Counter is only used for waiting on the semaphore.
552 : intptr_t semaphore_counter_;
553 : base::Mutex mutex_;
554 : SweptList swept_list_[kAllocationSpaces];
555 : SweepingList sweeping_list_[kAllocationSpaces];
556 : bool sweeping_in_progress_;
557 : // Counter is actively maintained by the concurrent tasks to avoid querying
558 : // the semaphore for maintaining a task counter on the main thread.
559 : base::AtomicNumber<intptr_t> num_sweeping_tasks_;
560 : };
561 :
562 : enum IterationMode {
563 : kKeepMarking,
564 : kClearMarkbits,
565 : };
566 :
567 : static void Initialize();
568 :
569 0 : MarkingState marking_state(HeapObject* object) const override {
570 0 : return MarkingState::Internal(object);
571 : }
572 :
573 0 : MarkingState marking_state(MemoryChunk* chunk) const override {
574 0 : return MarkingState::Internal(chunk);
575 : }
576 :
577 : void SetUp() override;
578 : void TearDown() override;
579 : // Performs a global garbage collection.
580 : void CollectGarbage() override;
581 :
582 : void CollectEvacuationCandidates(PagedSpace* space);
583 :
584 : void AddEvacuationCandidate(Page* p);
585 :
586 : // Prepares for GC by resetting relocation info in old and map spaces and
587 : // choosing spaces to compact.
588 : void Prepare();
589 :
590 : bool StartCompaction();
591 :
592 : void AbortCompaction();
593 :
594 13594 : CodeFlusher* code_flusher() { return code_flusher_; }
595 118101725 : inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
596 :
597 : INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
598 12772303 : return Page::FromAddress(reinterpret_cast<Address>(host))
599 12772303 : ->ShouldSkipEvacuationSlotRecording();
600 : }
601 :
602 : static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
603 : return Page::FromAddress(reinterpret_cast<Address>(obj))
604 : ->IsEvacuationCandidate();
605 : }
606 :
607 : void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
608 : void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
609 : void RecordCodeTargetPatch(Address pc, Code* target);
610 : INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
611 : INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
612 : Object* target));
613 : void RecordLiveSlotsOnPage(Page* page);
614 :
615 : void UpdateSlots(SlotsBuffer* buffer);
616 : void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
617 :
618 : void InvalidateCode(Code* code);
619 :
620 : void ClearMarkbits();
621 :
622 : bool is_compacting() const { return compacting_; }
623 :
624 : // Ensures that sweeping is finished.
625 : //
626 : // Note: Can only be called safely from main thread.
627 : void EnsureSweepingCompleted();
628 :
629 : // Help out in sweeping the corresponding space and refill memory that has
630 : // been regained.
631 : //
632 : // Note: Thread-safe.
633 : void SweepAndRefill(CompactionSpace* space);
634 :
635 : // Checks if sweeping is in progress right now on any space.
636 46353175 : bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
637 :
638 106692 : void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
639 :
640 : bool evacuation() const { return evacuation_; }
641 :
642 422511137 : MarkingDeque* marking_deque() { return &marking_deque_; }
643 :
644 : Sweeper& sweeper() { return sweeper_; }
645 :
646 : #ifdef DEBUG
647 : // Checks whether performing mark-compact collection.
648 : bool in_use() { return state_ > PREPARE_GC; }
649 : bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
650 : #endif
651 :
652 : #ifdef VERIFY_HEAP
653 : void VerifyValidStoreAndSlotsBufferEntries();
654 : void VerifyMarkbitsAreClean();
655 : static void VerifyMarkbitsAreClean(PagedSpace* space);
656 : static void VerifyMarkbitsAreClean(NewSpace* space);
657 : void VerifyWeakEmbeddedObjectsInCode();
658 : void VerifyOmittedMapChecks();
659 : #endif
660 :
661 : private:
662 : explicit MarkCompactCollector(Heap* heap);
663 :
664 : bool WillBeDeoptimized(Code* code);
665 :
666 : void ComputeEvacuationHeuristics(size_t area_size,
667 : int* target_fragmentation_percent,
668 : size_t* max_evacuated_bytes);
669 :
670 : void VisitAllObjects(HeapObjectVisitor* visitor);
671 :
672 : void RecordObjectStats();
673 :
674 : // Finishes GC, performs heap verification if enabled.
675 : void Finish();
676 :
677 : // Mark code objects that are active on the stack to prevent them
678 : // from being flushed.
679 : void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
680 :
681 : void PrepareForCodeFlushing();
682 :
683 : // Marking operations for objects reachable from roots.
684 : void MarkLiveObjects() override;
685 :
686 : // Pushes a black object onto the marking stack and accounts for live bytes.
687 : // Note that this assumes live bytes have not yet been counted.
688 : V8_INLINE void PushBlack(HeapObject* obj);
689 :
690 : // Unshifts a black object into the marking stack and accounts for live bytes.
691 : // Note that this assumes lives bytes have already been counted.
692 : V8_INLINE void UnshiftBlack(HeapObject* obj);
693 :
694 : // Marks the object black and pushes it on the marking stack.
695 : // This is for non-incremental marking only.
696 : V8_INLINE void MarkObject(HeapObject* obj);
697 :
698 : // Mark the heap roots and all objects reachable from them.
699 : void MarkRoots(RootMarkingVisitor* visitor);
700 :
701 : // Mark the string table specially. References to internalized strings from
702 : // the string table are weak.
703 : void MarkStringTable(RootMarkingVisitor* visitor);
704 :
705 : // Mark objects reachable (transitively) from objects in the marking stack
706 : // or overflowed in the heap.
707 : void ProcessMarkingDeque();
708 :
709 : // Mark objects reachable (transitively) from objects in the marking stack
710 : // or overflowed in the heap. This respects references only considered in
711 : // the final atomic marking pause including the following:
712 : // - Processing of objects reachable through Harmony WeakMaps.
713 : // - Objects reachable due to host application logic like object groups,
714 : // implicit references' groups, or embedder heap tracing.
715 : void ProcessEphemeralMarking(bool only_process_harmony_weak_collections);
716 :
717 : // If the call-site of the top optimized code was not prepared for
718 : // deoptimization, then treat the maps in the code as strong pointers,
719 : // otherwise a map can die and deoptimize the code.
720 : void ProcessTopOptimizedFrame(RootMarkingVisitor* visitor);
721 :
722 : // Collects a list of dependent code from maps embedded in optimize code.
723 : DependentCode* DependentCodeListFromNonLiveMaps();
724 :
725 : // Mark objects reachable (transitively) from objects in the marking
726 : // stack. This function empties the marking stack, but may leave
727 : // overflowed objects in the heap, in which case the marking stack's
728 : // overflow flag will be set.
729 : void EmptyMarkingDeque();
730 :
731 : // Refill the marking stack with overflowed objects from the heap. This
732 : // function either leaves the marking stack full or clears the overflow
733 : // flag on the marking stack.
734 : void RefillMarkingDeque();
735 :
736 : // Helper methods for refilling the marking stack by discovering grey objects
737 : // on various pages of the heap. Used by {RefillMarkingDeque} only.
738 : template <class T>
739 : void DiscoverGreyObjectsWithIterator(T* it);
740 : void DiscoverGreyObjectsOnPage(MemoryChunk* p);
741 : void DiscoverGreyObjectsInSpace(PagedSpace* space);
742 : void DiscoverGreyObjectsInNewSpace();
743 :
744 : // Callback function for telling whether the object *p is an unmarked
745 : // heap object.
746 : static bool IsUnmarkedHeapObject(Object** p);
747 :
748 : // Clear non-live references in weak cells, transition and descriptor arrays,
749 : // and deoptimize dependent code of non-live maps.
750 : void ClearNonLiveReferences();
751 : void MarkDependentCodeForDeoptimization(DependentCode* list);
752 : // Find non-live targets of simple transitions in the given list. Clear
753 : // transitions to non-live targets and if needed trim descriptors arrays.
754 : void ClearSimpleMapTransitions(Object* non_live_map_list);
755 : void ClearSimpleMapTransition(Map* map, Map* dead_transition);
756 : // Compact every array in the global list of transition arrays and
757 : // trim the corresponding descriptor array if a transition target is non-live.
758 : void ClearFullMapTransitions();
759 : bool CompactTransitionArray(Map* map, TransitionArray* transitions,
760 : DescriptorArray* descriptors);
761 : void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
762 : void TrimEnumCache(Map* map, DescriptorArray* descriptors);
763 :
764 : // Mark all values associated with reachable keys in weak collections
765 : // encountered so far. This might push new object or even new weak maps onto
766 : // the marking stack.
767 : void ProcessWeakCollections();
768 :
769 : // After all reachable objects have been marked those weak map entries
770 : // with an unreachable key are removed from all encountered weak maps.
771 : // The linked list of all encountered weak maps is destroyed.
772 : void ClearWeakCollections();
773 :
774 : // We have to remove all encountered weak maps from the list of weak
775 : // collections when incremental marking is aborted.
776 : void AbortWeakCollections();
777 :
778 : void ClearWeakCells(Object** non_live_map_list,
779 : DependentCode** dependent_code_list);
780 : void AbortWeakCells();
781 :
782 : void AbortTransitionArrays();
783 :
784 : // Starts sweeping of spaces by contributing on the main thread and setting
785 : // up other pages for sweeping. Does not start sweeper tasks.
786 : void StartSweepSpaces();
787 : void StartSweepSpace(PagedSpace* space);
788 :
789 : void EvacuatePrologue();
790 : void EvacuateEpilogue();
791 : void EvacuatePagesInParallel();
792 :
793 : void EvacuateNewSpaceAndCandidates();
794 :
795 : void UpdatePointersAfterEvacuation();
796 :
797 : void ReleaseEvacuationCandidates();
798 :
799 : base::Semaphore page_parallel_job_semaphore_;
800 :
801 : #ifdef DEBUG
802 : enum CollectorState {
803 : IDLE,
804 : PREPARE_GC,
805 : MARK_LIVE_OBJECTS,
806 : SWEEP_SPACES,
807 : ENCODE_FORWARDING_ADDRESSES,
808 : UPDATE_POINTERS,
809 : RELOCATE_OBJECTS
810 : };
811 :
812 : // The current stage of the collector.
813 : CollectorState state_;
814 : #endif
815 :
816 : bool was_marked_incrementally_;
817 :
818 : bool evacuation_;
819 :
820 : // True if we are collecting slots to perform evacuation from evacuation
821 : // candidates.
822 : bool compacting_;
823 :
824 : bool black_allocation_;
825 :
826 : bool have_code_to_deoptimize_;
827 :
828 : MarkingDeque marking_deque_;
829 :
830 : CodeFlusher* code_flusher_;
831 :
832 : // Candidates for pages that should be evacuated.
833 : List<Page*> evacuation_candidates_;
834 : // Pages that are actually processed during evacuation.
835 : List<Page*> old_space_evacuation_pages_;
836 : List<Page*> new_space_evacuation_pages_;
837 :
838 : Sweeper sweeper_;
839 :
840 : friend class CodeMarkingVisitor;
841 : friend class Heap;
842 : friend class IncrementalMarkingMarkingVisitor;
843 : friend class MarkCompactMarkingVisitor;
844 : friend class MarkingVisitor;
845 : friend class RecordMigratedSlotVisitor;
846 : friend class SharedFunctionInfoMarkingVisitor;
847 : friend class StaticYoungGenerationMarkingVisitor;
848 : friend class StoreBuffer;
849 : };
850 :
851 : class EvacuationScope BASE_EMBEDDED {
852 : public:
853 : explicit EvacuationScope(MarkCompactCollector* collector)
854 : : collector_(collector) {
855 : collector_->set_evacuation(true);
856 : }
857 :
858 : ~EvacuationScope() { collector_->set_evacuation(false); }
859 :
860 : private:
861 : MarkCompactCollector* collector_;
862 : };
863 :
864 : } // namespace internal
865 : } // namespace v8
866 :
867 : #endif // V8_HEAP_MARK_COMPACT_H_
|