Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/concurrent-marking.h"
6 :
7 : #include <stack>
8 : #include <unordered_map>
9 :
10 : #include "include/v8config.h"
11 : #include "src/base/template-utils.h"
12 : #include "src/heap/gc-tracer.h"
13 : #include "src/heap/heap-inl.h"
14 : #include "src/heap/heap.h"
15 : #include "src/heap/mark-compact-inl.h"
16 : #include "src/heap/mark-compact.h"
17 : #include "src/heap/marking.h"
18 : #include "src/heap/objects-visiting-inl.h"
19 : #include "src/heap/objects-visiting.h"
20 : #include "src/heap/worklist.h"
21 : #include "src/isolate.h"
22 : #include "src/objects/data-handler-inl.h"
23 : #include "src/objects/embedder-data-array-inl.h"
24 : #include "src/objects/hash-table-inl.h"
25 : #include "src/objects/slots-inl.h"
26 : #include "src/transitions-inl.h"
27 : #include "src/utils-inl.h"
28 : #include "src/utils.h"
29 : #include "src/v8.h"
30 :
31 : namespace v8 {
32 : namespace internal {
33 :
34 : class ConcurrentMarkingState final
35 : : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
36 : public:
37 : explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
38 490641 : : memory_chunk_data_(memory_chunk_data) {}
39 :
40 3505477985 : Bitmap* bitmap(const MemoryChunk* chunk) {
41 : DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
42 : reinterpret_cast<intptr_t>(chunk),
43 : MemoryChunk::kMarkBitmapOffset);
44 3505477985 : return chunk->marking_bitmap_;
45 : }
46 :
47 487968033 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
48 974511035 : (*memory_chunk_data_)[chunk].live_bytes += by;
49 486543002 : }
50 :
51 : // The live_bytes and SetLiveBytes methods of the marking state are
52 : // not used by the concurrent marker.
53 :
54 : private:
55 : MemoryChunkDataMap* memory_chunk_data_;
56 : };
57 :
58 : // Helper class for storing in-object slot addresses and values.
59 : class SlotSnapshot {
60 : public:
61 124168701 : SlotSnapshot() : number_of_slots_(0) {}
62 : int number_of_slots() const { return number_of_slots_; }
63 374259990 : ObjectSlot slot(int i) const { return snapshot_[i].first; }
64 374259990 : Object value(int i) const { return snapshot_[i].second; }
65 62217775 : void clear() { number_of_slots_ = 0; }
66 : void add(ObjectSlot slot, Object value) {
67 378750161 : snapshot_[number_of_slots_++] = {slot, value};
68 : }
69 :
70 : private:
71 : static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
72 : int number_of_slots_;
73 : std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
74 : DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
75 : };
76 :
77 490516 : class ConcurrentMarkingVisitor final
78 : : public HeapVisitor<int, ConcurrentMarkingVisitor> {
79 : public:
80 : using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
81 :
82 : explicit ConcurrentMarkingVisitor(
83 : ConcurrentMarking::MarkingWorklist* shared,
84 : MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
85 : ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
86 : bool embedder_tracing_enabled, unsigned mark_compact_epoch,
87 : bool is_forced_gc)
88 : : shared_(shared, task_id),
89 : weak_objects_(weak_objects),
90 : embedder_objects_(embedder_objects, task_id),
91 : marking_state_(memory_chunk_data),
92 : memory_chunk_data_(memory_chunk_data),
93 : task_id_(task_id),
94 : embedder_tracing_enabled_(embedder_tracing_enabled),
95 : mark_compact_epoch_(mark_compact_epoch),
96 1962564 : is_forced_gc_(is_forced_gc) {}
97 :
98 : template <typename T>
99 : static V8_INLINE T Cast(HeapObject object) {
100 : return T::cast(object);
101 : }
102 :
103 473089341 : bool ShouldVisit(HeapObject object) {
104 948268679 : return marking_state_.GreyToBlack(object);
105 : }
106 :
107 : bool AllowDefaultJSObjectVisit() { return false; }
108 :
109 : template <typename THeapObjectSlot>
110 2607840617 : void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
111 : HeapObject heap_object) {
112 2607840617 : MarkObject(heap_object);
113 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
114 2610032404 : }
115 :
116 : template <typename THeapObjectSlot>
117 78970839 : void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
118 : HeapObject heap_object) {
119 : #ifdef THREAD_SANITIZER
120 : // Perform a dummy acquire load to tell TSAN that there is no data race
121 : // in mark-bit initialization. See MemoryChunk::Initialize for the
122 : // corresponding release store.
123 : MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
124 : CHECK_NOT_NULL(chunk->synchronized_heap());
125 : #endif
126 79079245 : if (marking_state_.IsBlackOrGrey(heap_object)) {
127 : // Weak references with live values are directly processed here to
128 : // reduce the processing time of weak cells during the main GC
129 : // pause.
130 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
131 : } else {
132 : // If we do not know about liveness of the value, we have to process
133 : // the reference when we know the liveness of the whole transitive
134 : // closure.
135 20462336 : weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
136 : }
137 79015304 : }
138 :
139 772024739 : void VisitPointers(HeapObject host, ObjectSlot start,
140 : ObjectSlot end) override {
141 : VisitPointersImpl(host, start, end);
142 768712879 : }
143 :
144 53615973 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
145 : MaybeObjectSlot end) override {
146 : VisitPointersImpl(host, start, end);
147 53403334 : }
148 :
149 : template <typename TSlot>
150 : V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
151 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
152 3990669306 : for (TSlot slot = start; slot < end; ++slot) {
153 3162593988 : typename TSlot::TObject object = slot.Relaxed_Load();
154 3164386258 : HeapObject heap_object;
155 3164386258 : if (object.GetHeapObjectIfStrong(&heap_object)) {
156 : // If the reference changes concurrently from strong to weak, the write
157 : // barrier will treat the weak reference as strong, so we won't miss the
158 : // weak reference.
159 2621817971 : ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
160 261970536 : } else if (TSlot::kCanBeWeak &&
161 : object.GetHeapObjectIfWeak(&heap_object)) {
162 78982132 : ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
163 : }
164 : }
165 : }
166 :
167 : // Weak list pointers should be ignored during marking. The lists are
168 : // reconstructed after GC.
169 44791679 : void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
170 44791679 : ObjectSlot end) final {}
171 :
172 4305946 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
173 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
174 : HeapObject object = rinfo->target_object();
175 4305595 : RecordRelocSlot(host, rinfo, object);
176 4303942 : if (!marking_state_.IsBlackOrGrey(object)) {
177 314090 : if (host->IsWeakObject(object)) {
178 : weak_objects_->weak_objects_in_code.Push(task_id_,
179 58156 : std::make_pair(object, host));
180 : } else {
181 256006 : MarkObject(object);
182 : }
183 : }
184 4304069 : }
185 :
186 502078 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
187 : DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
188 502090 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
189 502091 : RecordRelocSlot(host, rinfo, target);
190 502080 : MarkObject(target);
191 502065 : }
192 :
193 498747107 : void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
194 872820826 : for (int i = 0; i < snapshot.number_of_slots(); i++) {
195 : ObjectSlot slot = snapshot.slot(i);
196 374259990 : Object object = snapshot.value(i);
197 : DCHECK(!HasWeakHeapObjectTag(object));
198 381035268 : if (!object->IsHeapObject()) continue;
199 : HeapObject heap_object = HeapObject::cast(object);
200 368090602 : MarkObject(heap_object);
201 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
202 : }
203 62150423 : }
204 :
205 : // ===========================================================================
206 : // JS object =================================================================
207 : // ===========================================================================
208 :
209 34668 : int VisitJSObject(Map map, JSObject object) {
210 34668 : return VisitJSObjectSubclass(map, object);
211 : }
212 :
213 12167922 : int VisitJSObjectFast(Map map, JSObject object) {
214 12158178 : return VisitJSObjectSubclassFast(map, object);
215 : }
216 :
217 22370 : int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
218 22370 : return VisitJSObjectSubclass(map, object);
219 : }
220 :
221 45 : int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
222 45 : int size = VisitJSObjectSubclass(map, weak_ref);
223 45 : if (size == 0) {
224 : return 0;
225 : }
226 90 : if (weak_ref->target()->IsHeapObject()) {
227 90 : HeapObject target = HeapObject::cast(weak_ref->target());
228 45 : if (marking_state_.IsBlackOrGrey(target)) {
229 : // Record the slot inside the JSWeakRef, since the
230 : // VisitJSObjectSubclass above didn't visit it.
231 : ObjectSlot slot =
232 : HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
233 : MarkCompactCollector::RecordSlot(weak_ref, slot, target);
234 : } else {
235 : // JSWeakRef points to a potentially dead object. We have to process
236 : // them when we know the liveness of the whole transitive closure.
237 30 : weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
238 : }
239 : }
240 45 : return size;
241 : }
242 :
243 18 : int VisitWeakCell(Map map, WeakCell weak_cell) {
244 18 : if (!ShouldVisit(weak_cell)) return 0;
245 :
246 : int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
247 : VisitMapPointer(weak_cell, weak_cell->map_slot());
248 18 : WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
249 36 : if (weak_cell->target()->IsHeapObject()) {
250 36 : HeapObject target = HeapObject::cast(weak_cell->target());
251 18 : if (marking_state_.IsBlackOrGrey(target)) {
252 : // Record the slot inside the WeakCell, since the IterateBody above
253 : // didn't visit it.
254 : ObjectSlot slot =
255 : HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
256 : MarkCompactCollector::RecordSlot(weak_cell, slot, target);
257 : } else {
258 : // WeakCell points to a potentially dead object. We have to process
259 : // them when we know the liveness of the whole transitive closure.
260 10 : weak_objects_->weak_cells.Push(task_id_, weak_cell);
261 : }
262 : }
263 18 : return size;
264 : }
265 :
266 : // Some JS objects can carry back links to embedders that contain information
267 : // relevant to the garbage collectors.
268 :
269 76718 : int VisitJSApiObject(Map map, JSObject object) {
270 76718 : return VisitEmbedderTracingSubclass(map, object);
271 : }
272 :
273 237429 : int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
274 237429 : return VisitEmbedderTracingSubclass(map, object);
275 : }
276 :
277 22 : int VisitJSDataView(Map map, JSDataView object) {
278 22 : return VisitEmbedderTracingSubclass(map, object);
279 : }
280 :
281 61320 : int VisitJSTypedArray(Map map, JSTypedArray object) {
282 61320 : return VisitEmbedderTracingSubclass(map, object);
283 : }
284 :
285 : // ===========================================================================
286 : // Strings with pointers =====================================================
287 : // ===========================================================================
288 :
289 10227707 : int VisitConsString(Map map, ConsString object) {
290 10226340 : return VisitFullyWithSnapshot(map, object);
291 : }
292 :
293 51250 : int VisitSlicedString(Map map, SlicedString object) {
294 50781 : return VisitFullyWithSnapshot(map, object);
295 : }
296 :
297 33644 : int VisitThinString(Map map, ThinString object) {
298 33671 : return VisitFullyWithSnapshot(map, object);
299 : }
300 :
301 : // ===========================================================================
302 : // Strings without pointers ==================================================
303 : // ===========================================================================
304 :
305 42758731 : int VisitSeqOneByteString(Map map, SeqOneByteString object) {
306 42758731 : if (!ShouldVisit(object)) return 0;
307 : VisitMapPointer(object, object->map_slot());
308 : return SeqOneByteString::SizeFor(object->synchronized_length());
309 : }
310 :
311 42362811 : int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
312 42362811 : if (!ShouldVisit(object)) return 0;
313 : VisitMapPointer(object, object->map_slot());
314 : return SeqTwoByteString::SizeFor(object->synchronized_length());
315 : }
316 :
317 : // ===========================================================================
318 : // Fixed array object ========================================================
319 : // ===========================================================================
320 :
321 14783 : int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
322 : MemoryChunk* chunk) {
323 : // The concurrent marker can process larger chunks than the main thread
324 : // marker.
325 : const int kProgressBarScanningChunk =
326 : RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
327 : DCHECK(marking_state_.IsBlackOrGrey(object));
328 14783 : marking_state_.GreyToBlack(object);
329 : int size = FixedArray::BodyDescriptor::SizeOf(map, object);
330 : int start =
331 : Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
332 14782 : int end = Min(size, start + kProgressBarScanningChunk);
333 14782 : if (start < end) {
334 : VisitPointers(object, HeapObject::RawField(object, start),
335 14782 : HeapObject::RawField(object, end));
336 : chunk->set_progress_bar(end);
337 14783 : if (end < size) {
338 : // The object can be pushed back onto the marking worklist only after
339 : // progress bar was updated.
340 13394 : shared_.Push(object);
341 : }
342 : }
343 14783 : return end - start;
344 : }
345 :
346 8985200 : int VisitFixedArray(Map map, FixedArray object) {
347 : // Arrays with the progress bar are not left-trimmable because they reside
348 : // in the large object space.
349 : MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
350 : return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
351 : ? VisitFixedArrayWithProgressBar(map, object, chunk)
352 8985200 : : VisitLeftTrimmableArray(map, object);
353 : }
354 :
355 85630 : int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
356 85630 : return VisitLeftTrimmableArray(map, object);
357 : }
358 :
359 : // ===========================================================================
360 : // Side-effectful visitation.
361 : // ===========================================================================
362 :
363 45174723 : int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
364 45174723 : if (!ShouldVisit(shared_info)) return 0;
365 :
366 : int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
367 : VisitMapPointer(shared_info, shared_info->map_slot());
368 : SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
369 44867699 : this);
370 :
371 : // If the SharedFunctionInfo has old bytecode, mark it as flushable,
372 : // otherwise visit the function data field strongly.
373 44980392 : if (shared_info->ShouldFlushBytecode()) {
374 247788 : weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
375 : } else {
376 : VisitPointer(shared_info, shared_info->RawField(
377 89611370 : SharedFunctionInfo::kFunctionDataOffset));
378 : }
379 45133777 : return size;
380 : }
381 :
382 1451585 : int VisitBytecodeArray(Map map, BytecodeArray object) {
383 1451585 : if (!ShouldVisit(object)) return 0;
384 1451647 : int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
385 : VisitMapPointer(object, object->map_slot());
386 1449869 : BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
387 1452399 : if (!is_forced_gc_) {
388 1185740 : object->MakeOlder();
389 : }
390 : return size;
391 : }
392 :
393 39288491 : int VisitJSFunction(Map map, JSFunction object) {
394 39288491 : int size = VisitJSObjectSubclass(map, object);
395 :
396 : // Check if the JSFunction needs reset due to bytecode being flushed.
397 39318152 : if (object->NeedsResetDueToFlushedBytecode()) {
398 6800 : weak_objects_->flushed_js_functions.Push(task_id_, object);
399 : }
400 :
401 39170170 : return size;
402 : }
403 :
404 28505199 : int VisitMap(Map meta_map, Map map) {
405 28505199 : if (!ShouldVisit(map)) return 0;
406 : int size = Map::BodyDescriptor::SizeOf(meta_map, map);
407 28614564 : if (map->CanTransition()) {
408 : // Maps that can transition share their descriptor arrays and require
409 : // special visiting logic to avoid memory leaks.
410 : // Since descriptor arrays are potentially shared, ensure that only the
411 : // descriptors that belong to this map are marked. The first time a
412 : // non-empty descriptor array is marked, its header is also visited. The
413 : // slot holding the descriptor array will be implicitly recorded when the
414 : // pointer fields of this map are visited.
415 28445676 : DescriptorArray descriptors = map->synchronized_instance_descriptors();
416 28461225 : MarkDescriptorArrayBlack(descriptors);
417 28233718 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
418 28233718 : if (number_of_own_descriptors) {
419 : // It is possible that the concurrent marker observes the
420 : // number_of_own_descriptors out of sync with the descriptors. In that
421 : // case the marking write barrier for the descriptor array will ensure
422 : // that all required descriptors are marked. The concurrent marker
423 : // just should avoid crashing in that case. That's why we need the
424 : // std::min<int>() below.
425 : VisitDescriptors(descriptors,
426 : std::min<int>(number_of_own_descriptors,
427 42864782 : descriptors->number_of_descriptors()));
428 : }
429 : // Mark the pointer fields of the Map. Since the transitions array has
430 : // been marked already, it is fine that one of these fields contains a
431 : // pointer to it.
432 : }
433 28427970 : Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
434 28359551 : return size;
435 : }
436 :
437 21547305 : void VisitDescriptors(DescriptorArray descriptor_array,
438 : int number_of_own_descriptors) {
439 21547305 : int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
440 : int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
441 21547305 : mark_compact_epoch_, new_marked);
442 21560942 : if (old_marked < new_marked) {
443 : VisitPointers(
444 : descriptor_array,
445 13486522 : MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
446 13486522 : MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
447 : }
448 21548195 : }
449 :
450 141232 : int VisitDescriptorArray(Map map, DescriptorArray array) {
451 141232 : if (!ShouldVisit(array)) return 0;
452 : VisitMapPointer(array, array->map_slot());
453 : int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
454 : VisitPointers(array, array->GetFirstPointerSlot(),
455 85583 : array->GetDescriptorSlot(0));
456 85583 : VisitDescriptors(array, array->number_of_descriptors());
457 85611 : return size;
458 : }
459 :
460 494692 : int VisitTransitionArray(Map map, TransitionArray array) {
461 494692 : if (!ShouldVisit(array)) return 0;
462 : VisitMapPointer(array, array->map_slot());
463 : int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
464 : TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
465 494674 : weak_objects_->transition_arrays.Push(task_id_, array);
466 494676 : return size;
467 : }
468 :
469 31941 : int VisitJSWeakCollection(Map map, JSWeakCollection object) {
470 31941 : return VisitJSObjectSubclass(map, object);
471 : }
472 :
473 31996 : int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
474 31996 : if (!ShouldVisit(table)) return 0;
475 32010 : weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
476 :
477 157758 : for (int i = 0; i < table->Capacity(); i++) {
478 : ObjectSlot key_slot =
479 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
480 126608 : HeapObject key = HeapObject::cast(table->KeyAt(i));
481 : MarkCompactCollector::RecordSlot(table, key_slot, key);
482 :
483 : ObjectSlot value_slot =
484 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
485 :
486 126623 : if (marking_state_.IsBlackOrGrey(key)) {
487 126516 : VisitPointer(table, value_slot);
488 :
489 : } else {
490 107 : Object value_obj = table->ValueAt(i);
491 :
492 107 : if (value_obj->IsHeapObject()) {
493 : HeapObject value = HeapObject::cast(value_obj);
494 : MarkCompactCollector::RecordSlot(table, value_slot, value);
495 :
496 : // Revisit ephemerons with both key and value unreachable at end
497 : // of concurrent marking cycle.
498 1 : if (marking_state_.IsWhite(value)) {
499 : weak_objects_->discovered_ephemerons.Push(task_id_,
500 0 : Ephemeron{key, value});
501 : }
502 : }
503 : }
504 : }
505 :
506 31882 : return table->SizeFromMap(map);
507 : }
508 :
509 : // Implements ephemeron semantics: Marks value if key is already reachable.
510 : // Returns true if value was actually marked.
511 104 : bool VisitEphemeron(HeapObject key, HeapObject value) {
512 104 : if (marking_state_.IsBlackOrGrey(key)) {
513 50 : if (marking_state_.WhiteToGrey(value)) {
514 50 : shared_.Push(value);
515 50 : return true;
516 : }
517 :
518 54 : } else if (marking_state_.IsWhite(value)) {
519 54 : weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
520 : }
521 :
522 : return false;
523 : }
524 :
525 2946789837 : void MarkObject(HeapObject object) {
526 : #ifdef THREAD_SANITIZER
527 : // Perform a dummy acquire load to tell TSAN that there is no data race
528 : // in mark-bit initialization. See MemoryChunk::Initialize for the
529 : // corresponding release store.
530 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
531 : CHECK_NOT_NULL(chunk->synchronized_heap());
532 : #endif
533 2957301124 : if (marking_state_.WhiteToGrey(object)) {
534 313965460 : shared_.Push(object);
535 : }
536 2957108398 : }
537 :
538 28447421 : void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
539 : marking_state_.WhiteToGrey(descriptors);
540 56640088 : if (marking_state_.GreyToBlack(descriptors)) {
541 : VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
542 13387192 : descriptors->GetDescriptorSlot(0));
543 : }
544 28263775 : }
545 :
546 : private:
547 : // Helper class for collecting in-object slot addresses and values.
548 0 : class SlotSnapshottingVisitor final : public ObjectVisitor {
549 : public:
550 : explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
551 62217775 : : slot_snapshot_(slot_snapshot) {
552 : slot_snapshot_->clear();
553 : }
554 :
555 124383562 : void VisitPointers(HeapObject host, ObjectSlot start,
556 : ObjectSlot end) override {
557 627517285 : for (ObjectSlot p = start; p < end; ++p) {
558 : Object object = p.Relaxed_Load();
559 378750161 : slot_snapshot_->add(p, object);
560 : }
561 124383562 : }
562 :
563 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
564 : MaybeObjectSlot end) override {
565 : // This should never happen, because we don't use snapshotting for objects
566 : // which contain weak references.
567 0 : UNREACHABLE();
568 : }
569 :
570 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
571 : // This should never happen, because snapshotting is performed only on
572 : // JSObjects (and derived classes).
573 0 : UNREACHABLE();
574 : }
575 :
576 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
577 : // This should never happen, because snapshotting is performed only on
578 : // JSObjects (and derived classes).
579 0 : UNREACHABLE();
580 : }
581 :
582 45 : void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
583 : ObjectSlot end) override {
584 : DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
585 45 : }
586 :
587 : private:
588 : SlotSnapshot* slot_snapshot_;
589 : };
590 :
591 : template <typename T>
592 : int VisitJSObjectSubclassFast(Map map, T object) {
593 : DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
594 : using TBodyDescriptor = typename T::FastBodyDescriptor;
595 12167922 : return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
596 : }
597 :
598 : template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
599 51921231 : int VisitJSObjectSubclass(Map map, T object) {
600 : int size = TBodyDescriptor::SizeOf(map, object);
601 51921231 : int used_size = map->UsedInstanceSize();
602 : DCHECK_LE(used_size, size);
603 : DCHECK_GE(used_size, T::kHeaderSize);
604 : return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
605 51913486 : used_size, size);
606 : }
607 :
608 : template <typename T>
609 375490 : int VisitEmbedderTracingSubclass(Map map, T object) {
610 : DCHECK(object->IsApiWrapper());
611 375490 : int size = VisitJSObjectSubclass(map, object);
612 375439 : if (size && embedder_tracing_enabled_) {
613 : // Success: The object needs to be processed for embedder references on
614 : // the main thread.
615 0 : embedder_objects_.Push(object);
616 : }
617 375439 : return size;
618 : }
619 :
620 : template <typename T>
621 9055746 : int VisitLeftTrimmableArray(Map map, T object) {
622 : // The synchronized_length() function checks that the length is a Smi.
623 : // This is not necessarily the case if the array is being left-trimmed.
624 9055746 : Object length = object->unchecked_synchronized_length();
625 9055746 : if (!ShouldVisit(object)) return 0;
626 : // The cached length must be the actual length as the array is not black.
627 : // Left trimming marks the array black before over-writing the length.
628 : DCHECK(length->IsSmi());
629 9080700 : int size = T::SizeFor(Smi::ToInt(length));
630 : VisitMapPointer(object, object->map_slot());
631 : T::BodyDescriptor::IterateBody(map, object, size, this);
632 : return size;
633 : }
634 :
635 : template <typename T>
636 : int VisitFullyWithSnapshot(Map map, T object) {
637 : using TBodyDescriptor = typename T::BodyDescriptor;
638 : int size = TBodyDescriptor::SizeOf(map, object);
639 : return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
640 10312601 : size);
641 : }
642 :
643 : template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
644 62221901 : int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
645 : const SlotSnapshot& snapshot =
646 62221901 : MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
647 62425591 : if (!ShouldVisit(object)) return 0;
648 62372084 : VisitPointersInSnapshot(object, snapshot);
649 39779808 : return size;
650 : }
651 :
652 : template <typename T, typename TBodyDescriptor>
653 62217775 : const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
654 62217775 : SlotSnapshottingVisitor visitor(&slot_snapshot_);
655 : visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
656 321187 : TBodyDescriptor::IterateBody(map, object, size, &visitor);
657 62424551 : return slot_snapshot_;
658 : }
659 :
660 4807581 : void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
661 : MarkCompactCollector::RecordRelocSlotInfo info =
662 4807581 : MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
663 4807474 : if (info.should_record) {
664 187730 : MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
665 187753 : if (!data.typed_slots) {
666 566 : data.typed_slots.reset(new TypedSlots());
667 : }
668 375506 : data.typed_slots->Insert(info.slot_type, info.offset);
669 : }
670 4807480 : }
671 :
672 : ConcurrentMarking::MarkingWorklist::View shared_;
673 : WeakObjects* weak_objects_;
674 : ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
675 : ConcurrentMarkingState marking_state_;
676 : MemoryChunkDataMap* memory_chunk_data_;
677 : int task_id_;
678 : SlotSnapshot slot_snapshot_;
679 : bool embedder_tracing_enabled_;
680 : const unsigned mark_compact_epoch_;
681 : bool is_forced_gc_;
682 : };
683 :
684 : // Strings can change maps due to conversion to thin string or external strings.
685 : // Use unchecked cast to avoid data race in slow dchecks.
686 : template <>
687 10227900 : ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
688 10227900 : return ConsString::unchecked_cast(object);
689 : }
690 :
691 : template <>
692 51130 : SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
693 51130 : return SlicedString::unchecked_cast(object);
694 : }
695 :
696 : template <>
697 33659 : ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
698 33659 : return ThinString::unchecked_cast(object);
699 : }
700 :
701 : template <>
702 42775528 : SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
703 42775528 : return SeqOneByteString::unchecked_cast(object);
704 : }
705 :
706 : template <>
707 42431159 : SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
708 42431159 : return SeqTwoByteString::unchecked_cast(object);
709 : }
710 :
711 : // Fixed array can become a free space during left trimming.
712 : template <>
713 8985527 : FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
714 8985527 : return FixedArray::unchecked_cast(object);
715 : }
716 :
717 : class ConcurrentMarking::Task : public CancelableTask {
718 : public:
719 : Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
720 : TaskState* task_state, int task_id)
721 : : CancelableTask(isolate),
722 : concurrent_marking_(concurrent_marking),
723 : task_state_(task_state),
724 583226 : task_id_(task_id) {}
725 :
726 1165715 : ~Task() override = default;
727 :
728 : private:
729 : // v8::internal::CancelableTask overrides.
730 490688 : void RunInternal() override {
731 490688 : concurrent_marking_->Run(task_id_, task_state_);
732 490941 : }
733 :
734 : ConcurrentMarking* concurrent_marking_;
735 : TaskState* task_state_;
736 : int task_id_;
737 : DISALLOW_COPY_AND_ASSIGN(Task);
738 : };
739 :
740 61064 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
741 : MarkingWorklist* on_hold,
742 : WeakObjects* weak_objects,
743 : EmbedderTracingWorklist* embedder_objects)
744 : : heap_(heap),
745 : shared_(shared),
746 : on_hold_(on_hold),
747 : weak_objects_(weak_objects),
748 610640 : embedder_objects_(embedder_objects) {
749 : // The runtime flag should be set only if the compile time flag was set.
750 : #ifndef V8_CONCURRENT_MARKING
751 : CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
752 : #endif
753 61064 : }
754 :
755 490546 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
756 1447473274 : TRACE_BACKGROUND_GC(heap_->tracer(),
757 : GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
758 : size_t kBytesUntilInterruptCheck = 64 * KB;
759 : int kObjectsUntilInterrupCheck = 1000;
760 : ConcurrentMarkingVisitor visitor(
761 : shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
762 981282 : task_id, heap_->local_embedder_heap_tracer()->InUse(),
763 981282 : task_state->mark_compact_epoch, task_state->is_forced_gc);
764 : double time_ms;
765 : size_t marked_bytes = 0;
766 490641 : if (FLAG_trace_concurrent_marking) {
767 : heap_->isolate()->PrintWithTimestamp(
768 0 : "Starting concurrent marking task %d\n", task_id);
769 : }
770 : bool ephemeron_marked = false;
771 :
772 : {
773 : TimedScope scope(&time_ms);
774 :
775 : {
776 490300 : Ephemeron ephemeron;
777 :
778 980704 : while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
779 104 : if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
780 : ephemeron_marked = true;
781 : }
782 : }
783 : }
784 :
785 : bool done = false;
786 1635238 : while (!done) {
787 : size_t current_marked_bytes = 0;
788 : int objects_processed = 0;
789 963441286 : while (current_marked_bytes < kBytesUntilInterruptCheck &&
790 481720643 : objects_processed < kObjectsUntilInterrupCheck) {
791 481555900 : HeapObject object;
792 481555900 : if (!shared_->Pop(task_id, &object)) {
793 : done = true;
794 490327 : break;
795 : }
796 481673469 : objects_processed++;
797 : // The order of the two loads is important.
798 481673469 : Address new_space_top = heap_->new_space()->original_top_acquire();
799 481673469 : Address new_space_limit = heap_->new_space()->original_limit_relaxed();
800 481673469 : Address new_large_object = heap_->new_lo_space()->pending_object();
801 : Address addr = object->address();
802 481673469 : if ((new_space_top <= addr && addr < new_space_limit) ||
803 : addr == new_large_object) {
804 8627992 : on_hold_->Push(task_id, object);
805 : } else {
806 : Map map = object->synchronized_map();
807 471451958 : current_marked_bytes += visitor.Visit(map, object);
808 : }
809 : }
810 655070 : marked_bytes += current_marked_bytes;
811 : base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
812 655070 : marked_bytes);
813 655070 : if (task_state->preemption_request) {
814 708 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
815 : "ConcurrentMarking::Run Preempted");
816 354 : break;
817 : }
818 : }
819 :
820 138142 : if (done) {
821 490254 : Ephemeron ephemeron;
822 :
823 980508 : while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
824 0 : if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
825 : ephemeron_marked = true;
826 : }
827 : }
828 : }
829 :
830 138197 : shared_->FlushToGlobal(task_id);
831 490313 : on_hold_->FlushToGlobal(task_id);
832 490324 : embedder_objects_->FlushToGlobal(task_id);
833 :
834 490228 : weak_objects_->transition_arrays.FlushToGlobal(task_id);
835 490289 : weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
836 490257 : weak_objects_->current_ephemerons.FlushToGlobal(task_id);
837 489956 : weak_objects_->next_ephemerons.FlushToGlobal(task_id);
838 489900 : weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
839 489685 : weak_objects_->weak_references.FlushToGlobal(task_id);
840 490055 : weak_objects_->js_weak_refs.FlushToGlobal(task_id);
841 490047 : weak_objects_->weak_cells.FlushToGlobal(task_id);
842 489918 : weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
843 489863 : weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
844 489842 : weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
845 489873 : base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
846 : total_marked_bytes_ += marked_bytes;
847 :
848 489873 : if (ephemeron_marked) {
849 : set_ephemeron_marked(true);
850 : }
851 :
852 : {
853 489873 : base::MutexGuard guard(&pending_lock_);
854 491039 : is_pending_[task_id] = false;
855 491039 : --pending_task_count_;
856 491039 : pending_condition_.NotifyAll();
857 : }
858 : }
859 490516 : if (FLAG_trace_concurrent_marking) {
860 : heap_->isolate()->PrintWithTimestamp(
861 : "Task %d concurrently marked %dKB in %.2fms\n", task_id,
862 0 : static_cast<int>(marked_bytes / KB), time_ms);
863 490243 : }
864 490979 : }
865 :
866 83318 : void ConcurrentMarking::ScheduleTasks() {
867 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
868 : DCHECK(!heap_->IsTearingDown());
869 83318 : base::MutexGuard guard(&pending_lock_);
870 : DCHECK_EQ(0, pending_task_count_);
871 83318 : if (task_count_ == 0) {
872 : static const int num_cores =
873 16475 : V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
874 : #if defined(V8_OS_MACOSX)
875 : // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
876 : // marking on competing hyper-threads (regresses Octane/Splay). As such,
877 : // only use num_cores/2, leaving one of those for the main thread.
878 : // TODO(ulan): Use all cores on Mac 10.12+.
879 : task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
880 : #else // defined(OS_MACOSX)
881 : // On other platforms use all logical cores, leaving one for the main
882 : // thread.
883 32950 : task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
884 : #endif // defined(OS_MACOSX)
885 : }
886 : // Task id 0 is for the main thread.
887 666544 : for (int i = 1; i <= task_count_; i++) {
888 583226 : if (!is_pending_[i]) {
889 583226 : if (FLAG_trace_concurrent_marking) {
890 : heap_->isolate()->PrintWithTimestamp(
891 583226 : "Scheduling concurrent marking task %d\n", i);
892 : }
893 583226 : task_state_[i].preemption_request = false;
894 : task_state_[i].mark_compact_epoch =
895 1166452 : heap_->mark_compact_collector()->epoch();
896 583226 : task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
897 583226 : is_pending_[i] = true;
898 583226 : ++pending_task_count_;
899 : auto task =
900 1166452 : base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
901 1166452 : cancelable_id_[i] = task->id();
902 1749678 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
903 : }
904 : }
905 : DCHECK_EQ(task_count_, pending_task_count_);
906 83318 : }
907 :
908 1290806 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
909 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
910 1290806 : if (heap_->IsTearingDown()) return;
911 : {
912 1290806 : base::MutexGuard guard(&pending_lock_);
913 1290806 : if (pending_task_count_ > 0) return;
914 : }
915 3614995 : if (!shared_->IsGlobalPoolEmpty() ||
916 3558045 : !weak_objects_->current_ephemerons.IsEmpty() ||
917 1167021 : !weak_objects_->discovered_ephemerons.IsEmpty()) {
918 56969 : ScheduleTasks();
919 : }
920 : }
921 :
922 243859 : bool ConcurrentMarking::Stop(StopRequest stop_request) {
923 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
924 243859 : base::MutexGuard guard(&pending_lock_);
925 :
926 243859 : if (pending_task_count_ == 0) return false;
927 :
928 30854 : if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
929 : CancelableTaskManager* task_manager =
930 61670 : heap_->isolate()->cancelable_task_manager();
931 246680 : for (int i = 1; i <= task_count_; i++) {
932 215845 : if (is_pending_[i]) {
933 164090 : if (task_manager->TryAbort(cancelable_id_[i]) ==
934 : TryAbortResult::kTaskAborted) {
935 76460 : is_pending_[i] = false;
936 76460 : --pending_task_count_;
937 87630 : } else if (stop_request == StopRequest::PREEMPT_TASKS) {
938 415 : task_state_[i].preemption_request = true;
939 : }
940 : }
941 : }
942 : }
943 89902 : while (pending_task_count_ > 0) {
944 59048 : pending_condition_.Wait(&pending_lock_);
945 : }
946 : for (int i = 1; i <= task_count_; i++) {
947 : DCHECK(!is_pending_[i]);
948 : }
949 : return true;
950 : }
951 :
952 0 : bool ConcurrentMarking::IsStopped() {
953 0 : if (!FLAG_concurrent_marking) return true;
954 :
955 0 : base::MutexGuard guard(&pending_lock_);
956 0 : return pending_task_count_ == 0;
957 : }
958 :
959 221058 : void ConcurrentMarking::FlushMemoryChunkData(
960 : MajorNonAtomicMarkingState* marking_state) {
961 : DCHECK_EQ(pending_task_count_, 0);
962 1768275 : for (int i = 1; i <= task_count_; i++) {
963 1547217 : MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
964 3937604 : for (auto& pair : memory_chunk_data) {
965 : // ClearLiveness sets the live bytes to zero.
966 : // Pages with zero live bytes might be already unmapped.
967 843170 : MemoryChunk* memory_chunk = pair.first;
968 : MemoryChunkData& data = pair.second;
969 843170 : if (data.live_bytes) {
970 : marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
971 : }
972 843170 : if (data.typed_slots) {
973 : RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
974 1124 : std::move(data.typed_slots));
975 : }
976 : }
977 : memory_chunk_data.clear();
978 1547217 : task_state_[i].marked_bytes = 0;
979 : }
980 : total_marked_bytes_ = 0;
981 221058 : }
982 :
983 492785 : void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
984 2243394 : for (int i = 1; i <= task_count_; i++) {
985 1750609 : auto it = task_state_[i].memory_chunk_data.find(chunk);
986 1750609 : if (it != task_state_[i].memory_chunk_data.end()) {
987 2667 : it->second.live_bytes = 0;
988 : it->second.typed_slots.reset();
989 : }
990 : }
991 492785 : }
992 :
993 1062087 : size_t ConcurrentMarking::TotalMarkedBytes() {
994 : size_t result = 0;
995 8496696 : for (int i = 1; i <= task_count_; i++) {
996 : result +=
997 14869218 : base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
998 : }
999 1062087 : result += total_marked_bytes_;
1000 1062087 : return result;
1001 : }
1002 :
1003 23490 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
1004 : : concurrent_marking_(concurrent_marking),
1005 46261 : resume_on_exit_(FLAG_concurrent_marking &&
1006 : concurrent_marking_->Stop(
1007 69751 : ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
1008 : DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
1009 23490 : }
1010 :
1011 23490 : ConcurrentMarking::PauseScope::~PauseScope() {
1012 23490 : if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
1013 23490 : }
1014 :
1015 : } // namespace internal
1016 178779 : } // namespace v8
|