Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/concurrent-marking.h"
6 :
7 : #include <stack>
8 : #include <unordered_map>
9 :
10 : #include "include/v8config.h"
11 : #include "src/base/template-utils.h"
12 : #include "src/heap/gc-tracer.h"
13 : #include "src/heap/heap-inl.h"
14 : #include "src/heap/heap.h"
15 : #include "src/heap/mark-compact-inl.h"
16 : #include "src/heap/mark-compact.h"
17 : #include "src/heap/marking.h"
18 : #include "src/heap/objects-visiting-inl.h"
19 : #include "src/heap/objects-visiting.h"
20 : #include "src/heap/worklist.h"
21 : #include "src/isolate.h"
22 : #include "src/objects/data-handler-inl.h"
23 : #include "src/objects/embedder-data-array-inl.h"
24 : #include "src/objects/hash-table-inl.h"
25 : #include "src/objects/slots-inl.h"
26 : #include "src/transitions-inl.h"
27 : #include "src/utils-inl.h"
28 : #include "src/utils.h"
29 : #include "src/v8.h"
30 :
31 : namespace v8 {
32 : namespace internal {
33 :
34 : class ConcurrentMarkingState final
35 : : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
36 : public:
37 : explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
38 456088 : : memory_chunk_data_(memory_chunk_data) {}
39 :
40 : ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
41 : DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
42 : reinterpret_cast<intptr_t>(chunk),
43 : MemoryChunk::kMarkBitmapOffset);
44 : return chunk->marking_bitmap<AccessMode::ATOMIC>();
45 : }
46 :
47 423265610 : void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
48 842151971 : (*memory_chunk_data_)[chunk].live_bytes += by;
49 418886361 : }
50 :
51 : // The live_bytes and SetLiveBytes methods of the marking state are
52 : // not used by the concurrent marker.
53 :
54 : private:
55 : MemoryChunkDataMap* memory_chunk_data_;
56 : };
57 :
58 : // Helper class for storing in-object slot addresses and values.
59 : class SlotSnapshot {
60 : public:
61 114750913 : SlotSnapshot() : number_of_slots_(0) {}
62 : int number_of_slots() const { return number_of_slots_; }
63 337392323 : ObjectSlot slot(int i) const { return snapshot_[i].first; }
64 337392323 : Object value(int i) const { return snapshot_[i].second; }
65 56929058 : void clear() { number_of_slots_ = 0; }
66 : void add(ObjectSlot slot, Object value) {
67 336867517 : snapshot_[number_of_slots_++] = {slot, value};
68 : }
69 :
70 : private:
71 : static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
72 : int number_of_slots_;
73 : std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
74 : DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
75 : };
76 :
77 912544 : class ConcurrentMarkingVisitor final
78 : : public HeapVisitor<int, ConcurrentMarkingVisitor> {
79 : public:
80 : using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
81 :
82 : explicit ConcurrentMarkingVisitor(
83 : ConcurrentMarking::MarkingWorklist* shared,
84 : MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
85 : ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
86 : bool embedder_tracing_enabled, unsigned mark_compact_epoch,
87 : bool is_forced_gc)
88 : : shared_(shared, task_id),
89 : weak_objects_(weak_objects),
90 : embedder_objects_(embedder_objects, task_id),
91 : marking_state_(memory_chunk_data),
92 : memory_chunk_data_(memory_chunk_data),
93 : task_id_(task_id),
94 : embedder_tracing_enabled_(embedder_tracing_enabled),
95 : mark_compact_epoch_(mark_compact_epoch),
96 1824352 : is_forced_gc_(is_forced_gc) {
97 : // It is not safe to access flags from concurrent marking visitor. So
98 : // set the bytecode flush mode based on the flags here
99 456088 : bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
100 : }
101 :
102 : template <typename T>
103 : static V8_INLINE T Cast(HeapObject object) {
104 : return T::cast(object);
105 : }
106 :
107 400834922 : bool ShouldVisit(HeapObject object) {
108 807647633 : return marking_state_.GreyToBlack(object);
109 : }
110 :
111 : bool AllowDefaultJSObjectVisit() { return false; }
112 :
113 : template <typename THeapObjectSlot>
114 2036126987 : void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
115 : HeapObject heap_object) {
116 2036126987 : MarkObject(heap_object);
117 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
118 2038914869 : }
119 :
120 : template <typename THeapObjectSlot>
121 63613140 : void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
122 : HeapObject heap_object) {
123 : #ifdef THREAD_SANITIZER
124 : // Perform a dummy acquire load to tell TSAN that there is no data race
125 : // in mark-bit initialization. See MemoryChunk::Initialize for the
126 : // corresponding release store.
127 : MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
128 : CHECK_NOT_NULL(chunk->synchronized_heap());
129 : #endif
130 63613140 : if (marking_state_.IsBlackOrGrey(heap_object)) {
131 : // Weak references with live values are directly processed here to
132 : // reduce the processing time of weak cells during the main GC
133 : // pause.
134 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
135 : } else {
136 : // If we do not know about liveness of the value, we have to process
137 : // the reference when we know the liveness of the whole transitive
138 : // closure.
139 10994059 : weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
140 : }
141 63616976 : }
142 :
143 643851574 : void VisitPointers(HeapObject host, ObjectSlot start,
144 : ObjectSlot end) override {
145 : VisitPointersImpl(host, start, end);
146 644483638 : }
147 :
148 46206640 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
149 : MaybeObjectSlot end) override {
150 : VisitPointersImpl(host, start, end);
151 45677319 : }
152 :
153 : template <typename TSlot>
154 : V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
155 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
156 4012221520 : for (TSlot slot = start; slot < end; ++slot) {
157 2675331898 : typename TSlot::TObject object = slot.Relaxed_Load();
158 2680145270 : HeapObject heap_object;
159 2680145270 : if (object.GetHeapObjectIfStrong(&heap_object)) {
160 : // If the reference changes concurrently from strong to weak, the write
161 : // barrier will treat the weak reference as strong, so we won't miss the
162 : // weak reference.
163 2064261121 : ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
164 214078233 : } else if (TSlot::kCanBeWeak &&
165 : object.GetHeapObjectIfWeak(&heap_object)) {
166 63445769 : ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
167 : }
168 : }
169 : }
170 :
171 : // Weak list pointers should be ignored during marking. The lists are
172 : // reconstructed after GC.
173 37110248 : void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
174 37110248 : ObjectSlot end) final {}
175 :
176 2168131 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
177 : DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
178 : HeapObject object = rinfo->target_object();
179 2168131 : RecordRelocSlot(host, rinfo, object);
180 2164250 : if (!marking_state_.IsBlackOrGrey(object)) {
181 254696 : if (host->IsWeakObject(object)) {
182 58571 : weak_objects_->weak_objects_in_code.Push(task_id_,
183 58571 : std::make_pair(object, host));
184 : } else {
185 196104 : MarkObject(object);
186 : }
187 : }
188 2164287 : }
189 :
190 423638 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
191 : DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
192 423638 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
193 423625 : RecordRelocSlot(host, rinfo, target);
194 423617 : MarkObject(target);
195 423606 : }
196 :
197 56624412 : void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
198 731317248 : for (int i = 0; i < snapshot.number_of_slots(); i++) {
199 : ObjectSlot slot = snapshot.slot(i);
200 : Object object = snapshot.value(i);
201 : DCHECK(!HasWeakHeapObjectTag(object));
202 337392323 : if (!object->IsHeapObject()) continue;
203 : HeapObject heap_object = HeapObject::cast(object);
204 331567241 : MarkObject(heap_object);
205 : MarkCompactCollector::RecordSlot(host, slot, heap_object);
206 : }
207 56578507 : }
208 :
209 : // ===========================================================================
210 : // JS object =================================================================
211 : // ===========================================================================
212 :
213 26201 : int VisitJSObject(Map map, JSObject object) {
214 26201 : return VisitJSObjectSubclass(map, object);
215 : }
216 :
217 11573849 : int VisitJSObjectFast(Map map, JSObject object) {
218 11590638 : return VisitJSObjectSubclassFast(map, object);
219 : }
220 :
221 10340 : int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
222 10340 : return VisitJSObjectSubclass(map, object);
223 : }
224 :
225 43 : int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
226 43 : int size = VisitJSObjectSubclass(map, weak_ref);
227 43 : if (size == 0) {
228 : return 0;
229 : }
230 43 : if (weak_ref->target()->IsHeapObject()) {
231 : HeapObject target = HeapObject::cast(weak_ref->target());
232 43 : if (marking_state_.IsBlackOrGrey(target)) {
233 : // Record the slot inside the JSWeakRef, since the
234 : // VisitJSObjectSubclass above didn't visit it.
235 : ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
236 : MarkCompactCollector::RecordSlot(weak_ref, slot, target);
237 : } else {
238 : // JSWeakRef points to a potentially dead object. We have to process
239 : // them when we know the liveness of the whole transitive closure.
240 28 : weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
241 : }
242 : }
243 : return size;
244 : }
245 :
246 27 : int VisitWeakCell(Map map, WeakCell weak_cell) {
247 27 : if (!ShouldVisit(weak_cell)) return 0;
248 :
249 : int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
250 : VisitMapPointer(weak_cell, weak_cell->map_slot());
251 27 : WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
252 27 : if (weak_cell->target()->IsHeapObject()) {
253 : HeapObject target = HeapObject::cast(weak_cell->target());
254 27 : if (marking_state_.IsBlackOrGrey(target)) {
255 : // Record the slot inside the WeakCell, since the IterateBody above
256 : // didn't visit it.
257 : ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
258 : MarkCompactCollector::RecordSlot(weak_cell, slot, target);
259 : } else {
260 : // WeakCell points to a potentially dead object. We have to process
261 : // them when we know the liveness of the whole transitive closure.
262 17 : weak_objects_->weak_cells.Push(task_id_, weak_cell);
263 : }
264 : }
265 : return size;
266 : }
267 :
268 : // Some JS objects can carry back links to embedders that contain information
269 : // relevant to the garbage collectors.
270 :
271 71519 : int VisitJSApiObject(Map map, JSObject object) {
272 71519 : return VisitEmbedderTracingSubclass(map, object);
273 : }
274 :
275 43043 : int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
276 43043 : return VisitEmbedderTracingSubclass(map, object);
277 : }
278 :
279 33 : int VisitJSDataView(Map map, JSDataView object) {
280 33 : return VisitEmbedderTracingSubclass(map, object);
281 : }
282 :
283 60335 : int VisitJSTypedArray(Map map, JSTypedArray object) {
284 60335 : return VisitEmbedderTracingSubclass(map, object);
285 : }
286 :
287 : // ===========================================================================
288 : // Strings with pointers =====================================================
289 : // ===========================================================================
290 :
291 10144539 : int VisitConsString(Map map, ConsString object) {
292 10152802 : return VisitFullyWithSnapshot(map, object);
293 : }
294 :
295 61427 : int VisitSlicedString(Map map, SlicedString object) {
296 61482 : return VisitFullyWithSnapshot(map, object);
297 : }
298 :
299 57754 : int VisitThinString(Map map, ThinString object) {
300 57796 : return VisitFullyWithSnapshot(map, object);
301 : }
302 :
303 : // ===========================================================================
304 : // Strings without pointers ==================================================
305 : // ===========================================================================
306 :
307 37757023 : int VisitSeqOneByteString(Map map, SeqOneByteString object) {
308 37757023 : if (!ShouldVisit(object)) return 0;
309 : VisitMapPointer(object, object->map_slot());
310 : return SeqOneByteString::SizeFor(object->synchronized_length());
311 : }
312 :
313 36788551 : int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
314 36788551 : if (!ShouldVisit(object)) return 0;
315 : VisitMapPointer(object, object->map_slot());
316 : return SeqTwoByteString::SizeFor(object->synchronized_length());
317 : }
318 :
319 : // ===========================================================================
320 : // Fixed array object ========================================================
321 : // ===========================================================================
322 :
323 61779 : int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
324 : MemoryChunk* chunk) {
325 : // The concurrent marker can process larger chunks than the main thread
326 : // marker.
327 : const int kProgressBarScanningChunk =
328 : RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
329 : DCHECK(marking_state_.IsBlackOrGrey(object));
330 61779 : marking_state_.GreyToBlack(object);
331 : int size = FixedArray::BodyDescriptor::SizeOf(map, object);
332 : size_t current_progress_bar = chunk->ProgressBar();
333 61777 : if (current_progress_bar == 0) {
334 : // Try to move the progress bar forward to start offset. This solves the
335 : // problem of not being able to observe a progress bar reset when
336 : // processing the first kProgressBarScanningChunk.
337 7473 : if (!chunk->TrySetProgressBar(0,
338 : FixedArray::BodyDescriptor::kStartOffset))
339 : return 0;
340 : current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
341 : }
342 61779 : int start = static_cast<int>(current_progress_bar);
343 61779 : int end = Min(size, start + kProgressBarScanningChunk);
344 61779 : if (start < end) {
345 61732 : VisitPointers(object, object.RawField(start), object.RawField(end));
346 : // Setting the progress bar can fail if the object that is currently
347 : // scanned is also revisited. In this case, there may be two tasks racing
348 : // on the progress counter. The looser can bail out because the progress
349 : // bar is reset before the tasks race on the object.
350 61732 : if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
351 : // The object can be pushed back onto the marking worklist only after
352 : // progress bar was updated.
353 : shared_.Push(object);
354 : }
355 : }
356 61778 : return end - start;
357 : }
358 :
359 7507129 : int VisitFixedArray(Map map, FixedArray object) {
360 : // Arrays with the progress bar are not left-trimmable because they reside
361 : // in the large object space.
362 : MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
363 : return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
364 : ? VisitFixedArrayWithProgressBar(map, object, chunk)
365 7507129 : : VisitLeftTrimmableArray(map, object);
366 : }
367 :
368 : int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
369 88747 : return VisitLeftTrimmableArray(map, object);
370 : }
371 :
372 : // ===========================================================================
373 : // Side-effectful visitation.
374 : // ===========================================================================
375 :
376 36998694 : int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
377 36998694 : if (!ShouldVisit(shared_info)) return 0;
378 :
379 : int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
380 : VisitMapPointer(shared_info, shared_info->map_slot());
381 : SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
382 37207743 : this);
383 :
384 : // If the SharedFunctionInfo has old bytecode, mark it as flushable,
385 : // otherwise visit the function data field strongly.
386 36406376 : if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
387 201120 : weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
388 : } else {
389 : VisitPointer(shared_info, shared_info->RawField(
390 36298226 : SharedFunctionInfo::kFunctionDataOffset));
391 : }
392 : return size;
393 : }
394 :
395 1392045 : int VisitBytecodeArray(Map map, BytecodeArray object) {
396 1392045 : if (!ShouldVisit(object)) return 0;
397 : int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
398 : VisitMapPointer(object, object->map_slot());
399 1392921 : BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
400 1396416 : if (!is_forced_gc_) {
401 1117608 : object->MakeOlder();
402 : }
403 : return size;
404 : }
405 :
406 34963628 : int VisitJSFunction(Map map, JSFunction object) {
407 34963628 : int size = VisitJSObjectSubclass(map, object);
408 :
409 : // Check if the JSFunction needs reset due to bytecode being flushed.
410 69630800 : if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
411 34793407 : object->NeedsResetDueToFlushedBytecode()) {
412 6428 : weak_objects_->flushed_js_functions.Push(task_id_, object);
413 : }
414 :
415 34837391 : return size;
416 : }
417 :
418 26057854 : int VisitMap(Map meta_map, Map map) {
419 26057854 : if (!ShouldVisit(map)) return 0;
420 : int size = Map::BodyDescriptor::SizeOf(meta_map, map);
421 26561308 : if (map->CanTransition()) {
422 : // Maps that can transition share their descriptor arrays and require
423 : // special visiting logic to avoid memory leaks.
424 : // Since descriptor arrays are potentially shared, ensure that only the
425 : // descriptors that belong to this map are marked. The first time a
426 : // non-empty descriptor array is marked, its header is also visited. The
427 : // slot holding the descriptor array will be implicitly recorded when the
428 : // pointer fields of this map are visited.
429 : DescriptorArray descriptors = map->synchronized_instance_descriptors();
430 26434850 : MarkDescriptorArrayBlack(descriptors);
431 26285083 : int number_of_own_descriptors = map->NumberOfOwnDescriptors();
432 26285083 : if (number_of_own_descriptors) {
433 : // It is possible that the concurrent marker observes the
434 : // number_of_own_descriptors out of sync with the descriptors. In that
435 : // case the marking write barrier for the descriptor array will ensure
436 : // that all required descriptors are marked. The concurrent marker
437 : // just should avoid crashing in that case. That's why we need the
438 : // std::min<int>() below.
439 20186771 : VisitDescriptors(descriptors,
440 : std::min<int>(number_of_own_descriptors,
441 40373542 : descriptors->number_of_descriptors()));
442 : }
443 : // Mark the pointer fields of the Map. Since the transitions array has
444 : // been marked already, it is fine that one of these fields contains a
445 : // pointer to it.
446 : }
447 26347985 : Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
448 25795615 : return size;
449 : }
450 :
451 20294678 : void VisitDescriptors(DescriptorArray descriptor_array,
452 : int number_of_own_descriptors) {
453 20294678 : int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
454 20294678 : int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
455 20294678 : mark_compact_epoch_, new_marked);
456 20304648 : if (old_marked < new_marked) {
457 12006579 : VisitPointers(
458 : descriptor_array,
459 12076233 : MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
460 12076233 : MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
461 : }
462 20234994 : }
463 :
464 168023 : int VisitDescriptorArray(Map map, DescriptorArray array) {
465 168023 : if (!ShouldVisit(array)) return 0;
466 : VisitMapPointer(array, array->map_slot());
467 : int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
468 : VisitPointers(array, array->GetFirstPointerSlot(),
469 99379 : array->GetDescriptorSlot(0));
470 99300 : VisitDescriptors(array, array->number_of_descriptors());
471 99270 : return size;
472 : }
473 :
474 438872 : int VisitTransitionArray(Map map, TransitionArray array) {
475 438872 : if (!ShouldVisit(array)) return 0;
476 : VisitMapPointer(array, array->map_slot());
477 : int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
478 : TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
479 438709 : weak_objects_->transition_arrays.Push(task_id_, array);
480 438752 : return size;
481 : }
482 :
483 37580 : int VisitJSWeakCollection(Map map, JSWeakCollection object) {
484 37580 : return VisitJSObjectSubclass(map, object);
485 : }
486 :
487 36531 : int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
488 36531 : if (!ShouldVisit(table)) return 0;
489 37698 : weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
490 :
491 310719 : for (int i = 0; i < table->Capacity(); i++) {
492 : ObjectSlot key_slot =
493 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
494 : HeapObject key = HeapObject::cast(table->KeyAt(i));
495 : MarkCompactCollector::RecordSlot(table, key_slot, key);
496 :
497 : ObjectSlot value_slot =
498 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
499 :
500 138112 : if (marking_state_.IsBlackOrGrey(key)) {
501 137949 : VisitPointer(table, value_slot);
502 :
503 : } else {
504 163 : Object value_obj = table->ValueAt(i);
505 :
506 163 : if (value_obj->IsHeapObject()) {
507 : HeapObject value = HeapObject::cast(value_obj);
508 : MarkCompactCollector::RecordSlot(table, value_slot, value);
509 :
510 : // Revisit ephemerons with both key and value unreachable at end
511 : // of concurrent marking cycle.
512 68 : if (marking_state_.IsWhite(value)) {
513 60 : weak_objects_->discovered_ephemerons.Push(task_id_,
514 120 : Ephemeron{key, value});
515 : }
516 : }
517 : }
518 : }
519 :
520 35854 : return table->SizeFromMap(map);
521 : }
522 :
523 : // Implements ephemeron semantics: Marks value if key is already reachable.
524 : // Returns true if value was actually marked.
525 148 : bool ProcessEphemeron(HeapObject key, HeapObject value) {
526 148 : if (marking_state_.IsBlackOrGrey(key)) {
527 51 : if (marking_state_.WhiteToGrey(value)) {
528 : shared_.Push(value);
529 51 : return true;
530 : }
531 :
532 97 : } else if (marking_state_.IsWhite(value)) {
533 97 : weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
534 : }
535 :
536 : return false;
537 : }
538 :
539 2307738923 : void MarkObject(HeapObject object) {
540 : #ifdef THREAD_SANITIZER
541 : // Perform a dummy acquire load to tell TSAN that there is no data race
542 : // in mark-bit initialization. See MemoryChunk::Initialize for the
543 : // corresponding release store.
544 : MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
545 : CHECK_NOT_NULL(chunk->synchronized_heap());
546 : #endif
547 2317198499 : if (marking_state_.WhiteToGrey(object)) {
548 : shared_.Push(object);
549 : }
550 2315967953 : }
551 :
552 26380164 : void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
553 : marking_state_.WhiteToGrey(descriptors);
554 52845160 : if (marking_state_.GreyToBlack(descriptors)) {
555 : VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
556 11943456 : descriptors->GetDescriptorSlot(0));
557 : }
558 26408921 : }
559 :
560 : private:
561 : // Helper class for collecting in-object slot addresses and values.
562 57464040 : class SlotSnapshottingVisitor final : public ObjectVisitor {
563 : public:
564 : explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
565 56929058 : : slot_snapshot_(slot_snapshot) {
566 : slot_snapshot_->clear();
567 : }
568 :
569 112966253 : void VisitPointers(HeapObject host, ObjectSlot start,
570 : ObjectSlot end) override {
571 562800023 : for (ObjectSlot p = start; p < end; ++p) {
572 : Object object = p.Relaxed_Load();
573 336867517 : slot_snapshot_->add(p, object);
574 : }
575 112966253 : }
576 :
577 0 : void VisitPointers(HeapObject host, MaybeObjectSlot start,
578 : MaybeObjectSlot end) override {
579 : // This should never happen, because we don't use snapshotting for objects
580 : // which contain weak references.
581 0 : UNREACHABLE();
582 : }
583 :
584 0 : void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
585 : // This should never happen, because snapshotting is performed only on
586 : // JSObjects (and derived classes).
587 0 : UNREACHABLE();
588 : }
589 :
590 0 : void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
591 : // This should never happen, because snapshotting is performed only on
592 : // JSObjects (and derived classes).
593 0 : UNREACHABLE();
594 : }
595 :
596 43 : void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
597 : ObjectSlot end) override {
598 : DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
599 43 : }
600 :
601 : private:
602 : SlotSnapshot* slot_snapshot_;
603 : };
604 :
605 : template <typename T>
606 : int VisitJSObjectSubclassFast(Map map, T object) {
607 : DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
608 : using TBodyDescriptor = typename T::FastBodyDescriptor;
609 11573849 : return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
610 : }
611 :
612 : template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
613 46766516 : int VisitJSObjectSubclass(Map map, T object) {
614 : int size = TBodyDescriptor::SizeOf(map, object);
615 46766516 : int used_size = map->UsedInstanceSize();
616 : DCHECK_LE(used_size, size);
617 : DCHECK_GE(used_size, T::kHeaderSize);
618 : return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
619 46709477 : used_size, size);
620 : }
621 :
622 : template <typename T>
623 174898 : int VisitEmbedderTracingSubclass(Map map, T object) {
624 : DCHECK(object->IsApiWrapper());
625 174898 : int size = VisitJSObjectSubclass(map, object);
626 175116 : if (size && embedder_tracing_enabled_) {
627 : // Success: The object needs to be processed for embedder references on
628 : // the main thread.
629 : embedder_objects_.Push(object);
630 : }
631 175116 : return size;
632 : }
633 :
634 : template <typename T>
635 7533465 : int VisitLeftTrimmableArray(Map map, T object) {
636 : // The synchronized_length() function checks that the length is a Smi.
637 : // This is not necessarily the case if the array is being left-trimmed.
638 : Object length = object->unchecked_synchronized_length();
639 7533465 : if (!ShouldVisit(object)) return 0;
640 : // The cached length must be the actual length as the array is not black.
641 : // Left trimming marks the array black before over-writing the length.
642 : DCHECK(length->IsSmi());
643 : int size = T::SizeFor(Smi::ToInt(length));
644 : VisitMapPointer(object, object->map_slot());
645 : T::BodyDescriptor::IterateBody(map, object, size, this);
646 7384950 : return size;
647 : }
648 :
649 : template <typename T>
650 : int VisitFullyWithSnapshot(Map map, T object) {
651 : using TBodyDescriptor = typename T::BodyDescriptor;
652 : int size = TBodyDescriptor::SizeOf(map, object);
653 : return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
654 10263720 : size);
655 : }
656 :
657 : template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
658 56954616 : int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
659 : const SlotSnapshot& snapshot =
660 56954616 : MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
661 57460471 : if (!ShouldVisit(object)) return 0;
662 57295579 : VisitPointersInSnapshot(object, snapshot);
663 35033029 : return size;
664 : }
665 :
666 : template <typename T, typename TBodyDescriptor>
667 56929058 : const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
668 56929058 : SlotSnapshottingVisitor visitor(&slot_snapshot_);
669 : visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
670 113818 : TBodyDescriptor::IterateBody(map, object, size, &visitor);
671 57464040 : return slot_snapshot_;
672 : }
673 :
674 2591397 : void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
675 : MarkCompactCollector::RecordRelocSlotInfo info =
676 2591397 : MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
677 2588292 : if (info.should_record) {
678 49287 : MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
679 49273 : if (!data.typed_slots) {
680 470 : data.typed_slots.reset(new TypedSlots());
681 : }
682 98546 : data.typed_slots->Insert(info.slot_type, info.offset);
683 : }
684 2588284 : }
685 :
686 : ConcurrentMarking::MarkingWorklist::View shared_;
687 : WeakObjects* weak_objects_;
688 : ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
689 : ConcurrentMarkingState marking_state_;
690 : MemoryChunkDataMap* memory_chunk_data_;
691 : int task_id_;
692 : SlotSnapshot slot_snapshot_;
693 : bool embedder_tracing_enabled_;
694 : const unsigned mark_compact_epoch_;
695 : bool is_forced_gc_;
696 : BytecodeFlushMode bytecode_flush_mode_;
697 : };
698 :
699 : // Strings can change maps due to conversion to thin string or external strings.
700 : // Use unchecked cast to avoid data race in slow dchecks.
701 : template <>
702 0 : ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
703 0 : return ConsString::unchecked_cast(object);
704 : }
705 :
706 : template <>
707 0 : SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
708 0 : return SlicedString::unchecked_cast(object);
709 : }
710 :
711 : template <>
712 0 : ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
713 0 : return ThinString::unchecked_cast(object);
714 : }
715 :
716 : template <>
717 0 : SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
718 0 : return SeqOneByteString::unchecked_cast(object);
719 : }
720 :
721 : template <>
722 0 : SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
723 0 : return SeqTwoByteString::unchecked_cast(object);
724 : }
725 :
726 : // Fixed array can become a free space during left trimming.
727 : template <>
728 0 : FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
729 0 : return FixedArray::unchecked_cast(object);
730 : }
731 :
732 : class ConcurrentMarking::Task : public CancelableTask {
733 : public:
734 : Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
735 : TaskState* task_state, int task_id)
736 : : CancelableTask(isolate),
737 : concurrent_marking_(concurrent_marking),
738 : task_state_(task_state),
739 560994 : task_id_(task_id) {}
740 :
741 1121150 : ~Task() override = default;
742 :
743 : private:
744 : // v8::internal::CancelableTask overrides.
745 456063 : void RunInternal() override {
746 456063 : concurrent_marking_->Run(task_id_, task_state_);
747 456551 : }
748 :
749 : ConcurrentMarking* concurrent_marking_;
750 : TaskState* task_state_;
751 : int task_id_;
752 : DISALLOW_COPY_AND_ASSIGN(Task);
753 : };
754 :
755 62456 : ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
756 : MarkingWorklist* on_hold,
757 : WeakObjects* weak_objects,
758 : EmbedderTracingWorklist* embedder_objects)
759 : : heap_(heap),
760 : shared_(shared),
761 : on_hold_(on_hold),
762 : weak_objects_(weak_objects),
763 624560 : embedder_objects_(embedder_objects) {
764 : // The runtime flag should be set only if the compile time flag was set.
765 : #ifndef V8_CONCURRENT_MARKING
766 : CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
767 : #endif
768 62457 : }
769 :
770 455839 : void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
771 2280092 : TRACE_BACKGROUND_GC(heap_->tracer(),
772 : GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
773 : size_t kBytesUntilInterruptCheck = 64 * KB;
774 : int kObjectsUntilInterrupCheck = 1000;
775 : ConcurrentMarkingVisitor visitor(
776 912176 : shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
777 456088 : task_id, heap_->local_embedder_heap_tracer()->InUse(),
778 912176 : task_state->mark_compact_epoch, task_state->is_forced_gc);
779 : double time_ms;
780 : size_t marked_bytes = 0;
781 456088 : if (FLAG_trace_concurrent_marking) {
782 : heap_->isolate()->PrintWithTimestamp(
783 0 : "Starting concurrent marking task %d\n", task_id);
784 : }
785 : bool ephemeron_marked = false;
786 :
787 : {
788 : TimedScope scope(&time_ms);
789 :
790 : {
791 455769 : Ephemeron ephemeron;
792 :
793 455857 : while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
794 88 : if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
795 : ephemeron_marked = true;
796 : }
797 : }
798 : }
799 :
800 : bool done = false;
801 1669521 : while (!done) {
802 : size_t current_marked_bytes = 0;
803 : int objects_processed = 0;
804 1224018906 : while (current_marked_bytes < kBytesUntilInterruptCheck &&
805 408317245 : objects_processed < kObjectsUntilInterrupCheck) {
806 407558895 : HeapObject object;
807 407558895 : if (!shared_->Pop(task_id, &object)) {
808 : done = true;
809 455619 : break;
810 : }
811 407420738 : objects_processed++;
812 : // The order of the two loads is important.
813 407420738 : Address new_space_top = heap_->new_space()->original_top_acquire();
814 407420738 : Address new_space_limit = heap_->new_space()->original_limit_relaxed();
815 407420738 : Address new_large_object = heap_->new_lo_space()->pending_object();
816 : Address addr = object->address();
817 407420738 : if ((new_space_top <= addr && addr < new_space_limit) ||
818 : addr == new_large_object) {
819 0 : on_hold_->Push(task_id, object);
820 : } else {
821 : Map map = object->synchronized_map();
822 400178114 : current_marked_bytes += visitor.Visit(map, object);
823 : }
824 : }
825 1213969 : marked_bytes += current_marked_bytes;
826 1213969 : base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
827 : marked_bytes);
828 1213969 : if (task_state->preemption_request) {
829 1236 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
830 : "ConcurrentMarking::Run Preempted");
831 : break;
832 : }
833 : }
834 :
835 737104 : if (done) {
836 455449 : Ephemeron ephemeron;
837 :
838 455509 : while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
839 60 : if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
840 : ephemeron_marked = true;
841 : }
842 : }
843 : }
844 :
845 737275 : shared_->FlushToGlobal(task_id);
846 455847 : on_hold_->FlushToGlobal(task_id);
847 455851 : embedder_objects_->FlushToGlobal(task_id);
848 :
849 455820 : weak_objects_->transition_arrays.FlushToGlobal(task_id);
850 455880 : weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
851 455896 : weak_objects_->current_ephemerons.FlushToGlobal(task_id);
852 455713 : weak_objects_->next_ephemerons.FlushToGlobal(task_id);
853 455678 : weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
854 455518 : weak_objects_->weak_references.FlushToGlobal(task_id);
855 455747 : weak_objects_->js_weak_refs.FlushToGlobal(task_id);
856 455781 : weak_objects_->weak_cells.FlushToGlobal(task_id);
857 455721 : weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
858 455836 : weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
859 455766 : weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
860 455712 : base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
861 : total_marked_bytes_ += marked_bytes;
862 :
863 455712 : if (ephemeron_marked) {
864 : set_ephemeron_marked(true);
865 : }
866 :
867 : {
868 455712 : base::MutexGuard guard(&pending_lock_);
869 456695 : is_pending_[task_id] = false;
870 456695 : --pending_task_count_;
871 456695 : pending_condition_.NotifyAll();
872 : }
873 : }
874 456272 : if (FLAG_trace_concurrent_marking) {
875 0 : heap_->isolate()->PrintWithTimestamp(
876 : "Task %d concurrently marked %dKB in %.2fms\n", task_id,
877 0 : static_cast<int>(marked_bytes / KB), time_ms);
878 : }
879 456644 : }
880 :
881 80142 : void ConcurrentMarking::ScheduleTasks() {
882 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
883 : DCHECK(!heap_->IsTearingDown());
884 80142 : base::MutexGuard guard(&pending_lock_);
885 : DCHECK_EQ(0, pending_task_count_);
886 80142 : if (task_count_ == 0) {
887 : static const int num_cores =
888 16777 : V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
889 : #if defined(V8_OS_MACOSX)
890 : // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
891 : // marking on competing hyper-threads (regresses Octane/Splay). As such,
892 : // only use num_cores/2, leaving one of those for the main thread.
893 : // TODO(ulan): Use all cores on Mac 10.12+.
894 : task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
895 : #else // defined(OS_MACOSX)
896 : // On other platforms use all logical cores, leaving one for the main
897 : // thread.
898 33554 : task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
899 : #endif // defined(OS_MACOSX)
900 : }
901 : // Task id 0 is for the main thread.
902 641136 : for (int i = 1; i <= task_count_; i++) {
903 560994 : if (!is_pending_[i]) {
904 560994 : if (FLAG_trace_concurrent_marking) {
905 0 : heap_->isolate()->PrintWithTimestamp(
906 0 : "Scheduling concurrent marking task %d\n", i);
907 : }
908 560994 : task_state_[i].preemption_request = false;
909 560994 : task_state_[i].mark_compact_epoch =
910 1121988 : heap_->mark_compact_collector()->epoch();
911 560994 : task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
912 560994 : is_pending_[i] = true;
913 560994 : ++pending_task_count_;
914 : auto task =
915 1121988 : base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
916 1121988 : cancelable_id_[i] = task->id();
917 1682982 : V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
918 : }
919 : }
920 : DCHECK_EQ(task_count_, pending_task_count_);
921 80142 : }
922 :
923 1633431 : void ConcurrentMarking::RescheduleTasksIfNeeded() {
924 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
925 1633431 : if (heap_->IsTearingDown()) return;
926 : {
927 1633431 : base::MutexGuard guard(&pending_lock_);
928 1633431 : if (pending_task_count_ > 0) return;
929 : }
930 4660905 : if (!shared_->IsGlobalPoolEmpty() ||
931 4607847 : !weak_objects_->current_ephemerons.IsEmpty() ||
932 1518251 : !weak_objects_->discovered_ephemerons.IsEmpty()) {
933 53078 : ScheduleTasks();
934 : }
935 : }
936 :
937 229435 : bool ConcurrentMarking::Stop(StopRequest stop_request) {
938 : DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
939 229435 : base::MutexGuard guard(&pending_lock_);
940 :
941 229435 : if (pending_task_count_ == 0) return false;
942 :
943 35080 : if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
944 : CancelableTaskManager* task_manager =
945 35060 : heap_->isolate()->cancelable_task_manager();
946 525900 : for (int i = 1; i <= task_count_; i++) {
947 245420 : if (is_pending_[i]) {
948 182287 : if (task_manager->TryAbort(cancelable_id_[i]) ==
949 : TryAbortResult::kTaskAborted) {
950 89123 : is_pending_[i] = false;
951 89123 : --pending_task_count_;
952 93164 : } else if (stop_request == StopRequest::PREEMPT_TASKS) {
953 489 : task_state_[i].preemption_request = true;
954 : }
955 : }
956 : }
957 : }
958 65519 : while (pending_task_count_ > 0) {
959 65519 : pending_condition_.Wait(&pending_lock_);
960 : }
961 : for (int i = 1; i <= task_count_; i++) {
962 : DCHECK(!is_pending_[i]);
963 : }
964 : return true;
965 : }
966 :
967 0 : bool ConcurrentMarking::IsStopped() {
968 0 : if (!FLAG_concurrent_marking) return true;
969 :
970 0 : base::MutexGuard guard(&pending_lock_);
971 0 : return pending_task_count_ == 0;
972 : }
973 :
974 204174 : void ConcurrentMarking::FlushMemoryChunkData(
975 : MajorNonAtomicMarkingState* marking_state) {
976 : DCHECK_EQ(pending_task_count_, 0);
977 3062022 : for (int i = 1; i <= task_count_; i++) {
978 1428924 : MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
979 2368956 : for (auto& pair : memory_chunk_data) {
980 : // ClearLiveness sets the live bytes to zero.
981 : // Pages with zero live bytes might be already unmapped.
982 940032 : MemoryChunk* memory_chunk = pair.first;
983 : MemoryChunkData& data = pair.second;
984 940032 : if (data.live_bytes) {
985 : marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
986 : }
987 940032 : if (data.typed_slots) {
988 468 : RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
989 468 : std::move(data.typed_slots));
990 : }
991 : }
992 : memory_chunk_data.clear();
993 1428924 : task_state_[i].marked_bytes = 0;
994 : }
995 : total_marked_bytes_ = 0;
996 204174 : }
997 :
998 1110926 : void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
999 9044222 : for (int i = 1; i <= task_count_; i++) {
1000 : auto it = task_state_[i].memory_chunk_data.find(chunk);
1001 3966648 : if (it != task_state_[i].memory_chunk_data.end()) {
1002 8817 : it->second.live_bytes = 0;
1003 : it->second.typed_slots.reset();
1004 : }
1005 : }
1006 1110926 : }
1007 :
1008 1086617 : size_t ConcurrentMarking::TotalMarkedBytes() {
1009 : size_t result = 0;
1010 16299255 : for (int i = 1; i <= task_count_; i++) {
1011 : result +=
1012 15212638 : base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
1013 : }
1014 1086617 : result += total_marked_bytes_;
1015 1086617 : return result;
1016 : }
1017 :
1018 26098 : ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
1019 : : concurrent_marking_(concurrent_marking),
1020 51329 : resume_on_exit_(FLAG_concurrent_marking &&
1021 25231 : concurrent_marking_->Stop(
1022 52196 : ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
1023 : DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
1024 26098 : }
1025 :
1026 52196 : ConcurrentMarking::PauseScope::~PauseScope() {
1027 26098 : if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
1028 26098 : }
1029 :
1030 : } // namespace internal
1031 122036 : } // namespace v8
|