Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SCAVENGER_INL_H_
6 : #define V8_HEAP_SCAVENGER_INL_H_
7 :
8 : #include "src/heap/scavenger.h"
9 :
10 : #include "src/heap/incremental-marking-inl.h"
11 : #include "src/heap/local-allocator-inl.h"
12 : #include "src/objects-inl.h"
13 : #include "src/objects/map.h"
14 : #include "src/objects/slots-inl.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 :
19 17475540 : void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
20 : int size) {
21 17475540 : promotion_list_->PushRegularObject(task_id_, object, size);
22 17474032 : }
23 :
24 : void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
25 : int size) {
26 : promotion_list_->PushLargeObject(task_id_, object, map, size);
27 : }
28 :
29 : bool Scavenger::PromotionList::View::IsEmpty() {
30 : return promotion_list_->IsEmpty();
31 : }
32 :
33 : size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
34 : return promotion_list_->LocalPushSegmentSize(task_id_);
35 : }
36 :
37 : bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
38 35144719 : return promotion_list_->Pop(task_id_, entry);
39 : }
40 :
41 : bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
42 272451 : return promotion_list_->IsGlobalPoolEmpty();
43 : }
44 :
45 : bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
46 : return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
47 : }
48 :
49 : void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
50 : int size) {
51 35661461 : regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
52 : }
53 :
54 : void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
55 : Map map, int size) {
56 2745 : large_object_promotion_list_.Push(task_id, {object, map, size});
57 : }
58 :
59 : bool Scavenger::PromotionList::IsEmpty() {
60 : return regular_object_promotion_list_.IsEmpty() &&
61 : large_object_promotion_list_.IsEmpty();
62 : }
63 :
64 : size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
65 : return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
66 46719257 : large_object_promotion_list_.LocalPushSegmentSize(task_id);
67 : }
68 :
69 35155906 : bool Scavenger::PromotionList::Pop(int task_id,
70 : struct PromotionListEntry* entry) {
71 35155906 : ObjectAndSize regular_object;
72 35155906 : if (regular_object_promotion_list_.Pop(task_id, ®ular_object)) {
73 35043874 : entry->heap_object = regular_object.first;
74 35043874 : entry->size = regular_object.second;
75 35043874 : entry->map = entry->heap_object->map();
76 35043874 : return true;
77 : }
78 114576 : return large_object_promotion_list_.Pop(task_id, entry);
79 : }
80 :
81 272449 : bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
82 428373 : return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
83 272445 : large_object_promotion_list_.IsGlobalPoolEmpty();
84 : }
85 :
86 : bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
87 : // Threshold when to prioritize processing of the promotion list. Right
88 : // now we only look into the regular object list.
89 : const int kProcessPromotionListThreshold =
90 : kRegularObjectPromotionListSegmentSize / 2;
91 : return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
92 : }
93 :
94 : void Scavenger::PageMemoryFence(MaybeObject object) {
95 : #ifdef THREAD_SANITIZER
96 : // Perform a dummy acquire load to tell TSAN that there is no data race
97 : // with page initialization.
98 : HeapObject heap_object;
99 : if (object->GetHeapObject(&heap_object)) {
100 : MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
101 : CHECK_NOT_NULL(chunk->synchronized_heap());
102 : }
103 : #endif
104 : }
105 :
106 : bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
107 : int size) {
108 : // Copy the content of source to target.
109 17470842 : target->set_map_word(MapWord::FromMap(map));
110 : heap()->CopyBlock(target->address() + kTaggedSize,
111 129275065 : source->address() + kTaggedSize, size - kTaggedSize);
112 :
113 189621113 : Object old = source->map_slot().Release_CompareAndSwap(
114 189620963 : map, MapWord::FromForwardingAddress(target).ToMap());
115 95307382 : if (old != map) {
116 : // Other task migrated the object.
117 : return false;
118 : }
119 :
120 95259538 : if (V8_UNLIKELY(is_logging_)) {
121 8697 : heap()->OnMoveEvent(target, source, size);
122 : }
123 :
124 95250960 : if (is_incremental_marking_) {
125 : heap()->incremental_marking()->TransferColor(source, target);
126 : }
127 95250215 : heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
128 : return true;
129 : }
130 :
131 : template <typename THeapObjectSlot>
132 : CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
133 : Map map, THeapObjectSlot slot, HeapObject object, int object_size,
134 : ObjectFields object_fields) {
135 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
136 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
137 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
138 : DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
139 : AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
140 : AllocationResult allocation =
141 56430216 : allocator_.Allocate(NEW_SPACE, object_size, alignment);
142 :
143 0 : HeapObject target;
144 56261816 : if (allocation.To(&target)) {
145 : DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
146 : target));
147 : const bool self_success = MigrateObject(map, object, target, object_size);
148 55174652 : if (!self_success) {
149 0 : allocator_.FreeLast(NEW_SPACE, target, object_size);
150 0 : MapWord map_word = object->synchronized_map_word();
151 0 : HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
152 : DCHECK(!Heap::InFromPage(*slot));
153 0 : return Heap::InToPage(*slot)
154 : ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
155 50641 : : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
156 : }
157 : HeapObjectReference::Update(slot, target);
158 31358722 : if (object_fields == ObjectFields::kMaybePointers) {
159 47068707 : copied_list_.Push(ObjectAndSize(target, object_size));
160 : }
161 54966237 : copied_size_ += object_size;
162 : return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
163 : }
164 : return CopyAndForwardResult::FAILURE;
165 : }
166 :
167 : template <typename THeapObjectSlot>
168 : CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
169 : HeapObject object,
170 : int object_size,
171 : ObjectFields object_fields) {
172 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
173 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
174 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
175 : AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
176 : AllocationResult allocation =
177 39399188 : allocator_.Allocate(OLD_SPACE, object_size, alignment);
178 :
179 17471012 : HeapObject target;
180 39375794 : if (allocation.To(&target)) {
181 : DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
182 : target));
183 : const bool self_success = MigrateObject(map, object, target, object_size);
184 39968751 : if (!self_success) {
185 2 : allocator_.FreeLast(OLD_SPACE, target, object_size);
186 2 : MapWord map_word = object->synchronized_map_word();
187 2 : HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
188 : DCHECK(!Heap::InFromPage(*slot));
189 4 : return Heap::InToPage(*slot)
190 : ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
191 204097 : : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
192 : }
193 : HeapObjectReference::Update(slot, target);
194 22286977 : if (object_fields == ObjectFields::kMaybePointers) {
195 17475737 : promotion_list_.PushRegularObject(target, object_size);
196 : }
197 39603265 : promoted_size_ += object_size;
198 : return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
199 : }
200 : return CopyAndForwardResult::FAILURE;
201 : }
202 :
203 : SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
204 : CopyAndForwardResult result) {
205 : DCHECK_NE(CopyAndForwardResult::FAILURE, result);
206 : return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
207 94730883 : : REMOVE_SLOT;
208 : }
209 :
210 : bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
211 : ObjectFields object_fields) {
212 : // TODO(hpayer): Make this check size based, i.e.
213 : // object_size > kMaxRegularHeapObjectSize
214 188740761 : if (V8_UNLIKELY(
215 : FLAG_young_generation_large_objects &&
216 : MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
217 : DCHECK_EQ(NEW_LO_SPACE,
218 : MemoryChunk::FromHeapObject(object)->owner()->identity());
219 10371 : if (object->map_slot().Release_CompareAndSwap(
220 3457 : map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
221 3457 : surviving_new_large_objects_.insert({object, map});
222 3457 : promoted_size_ += object_size;
223 3456 : if (object_fields == ObjectFields::kMaybePointers) {
224 : promotion_list_.PushLargeObject(object, map, object_size);
225 : }
226 : }
227 : return true;
228 : }
229 : return false;
230 : }
231 :
232 : template <typename THeapObjectSlot>
233 : SlotCallbackResult Scavenger::EvacuateObjectDefault(
234 : Map map, THeapObjectSlot slot, HeapObject object, int object_size,
235 : ObjectFields object_fields) {
236 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
237 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
238 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
239 : SLOW_DCHECK(object->SizeFromMap(map) == object_size);
240 : CopyAndForwardResult result;
241 :
242 94429465 : if (HandleLargeObject(map, object, object_size, object_fields)) {
243 : return KEEP_SLOT;
244 : }
245 :
246 : SLOW_DCHECK(static_cast<size_t>(object_size) <=
247 : MemoryChunkLayout::AllocatableMemoryInDataPage());
248 :
249 94433424 : if (!heap()->ShouldBePromoted(object->address())) {
250 : // A semi-space copy may fail due to fragmentation. In that case, we
251 : // try to promote the object.
252 : result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
253 56389050 : if (result != CopyAndForwardResult::FAILURE) {
254 : return RememberedSetEntryNeeded(result);
255 : }
256 : }
257 :
258 : // We may want to promote this object if the object was already semi-space
259 : // copied in a previes young generation GC or if the semi-space copy above
260 : // failed.
261 : result = PromoteObject(map, slot, object, object_size, object_fields);
262 39737813 : if (result != CopyAndForwardResult::FAILURE) {
263 : return RememberedSetEntryNeeded(result);
264 : }
265 :
266 : // If promotion failed, we try to copy the object to the other semi-space.
267 : result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
268 0 : if (result != CopyAndForwardResult::FAILURE) {
269 : return RememberedSetEntryNeeded(result);
270 : }
271 :
272 0 : heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
273 0 : UNREACHABLE();
274 : }
275 :
276 : template <typename THeapObjectSlot>
277 481937 : SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
278 : ThinString object,
279 : int object_size) {
280 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
281 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
282 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
283 481937 : if (!is_incremental_marking_) {
284 : // The ThinString should die after Scavenge, so avoid writing the proper
285 : // forwarding pointer and instead just signal the actual object as forwarded
286 : // reference.
287 : String actual = object->actual();
288 : // ThinStrings always refer to internalized strings, which are always in old
289 : // space.
290 : DCHECK(!Heap::InYoungGeneration(actual));
291 : HeapObjectReference::Update(slot, actual);
292 : return REMOVE_SLOT;
293 : }
294 :
295 : DCHECK_EQ(ObjectFields::kMaybePointers,
296 : Map::ObjectFieldsFrom(map->visitor_id()));
297 : return EvacuateObjectDefault(map, slot, object, object_size,
298 7107 : ObjectFields::kMaybePointers);
299 : }
300 :
301 : template <typename THeapObjectSlot>
302 41701096 : SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
303 : THeapObjectSlot slot,
304 : ConsString object,
305 : int object_size) {
306 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
307 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
308 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
309 : DCHECK(IsShortcutCandidate(map->instance_type()));
310 82745647 : if (!is_incremental_marking_ &&
311 : object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
312 478451 : HeapObject first = HeapObject::cast(object->unchecked_first());
313 :
314 : HeapObjectReference::Update(slot, first);
315 :
316 478451 : if (!Heap::InYoungGeneration(first)) {
317 : object->map_slot().Release_Store(
318 : MapWord::FromForwardingAddress(first).ToMap());
319 204 : return REMOVE_SLOT;
320 : }
321 :
322 : MapWord first_word = first->synchronized_map_word();
323 478247 : if (first_word.IsForwardingAddress()) {
324 : HeapObject target = first_word.ToForwardingAddress();
325 :
326 : HeapObjectReference::Update(slot, target);
327 : object->map_slot().Release_Store(
328 : MapWord::FromForwardingAddress(target).ToMap());
329 3430 : return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
330 : }
331 : Map map = first_word.ToMap();
332 : SlotCallbackResult result =
333 : EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map),
334 474817 : Map::ObjectFieldsFrom(map->visitor_id()));
335 : object->map_slot().Release_Store(
336 : MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
337 474835 : return result;
338 : }
339 : DCHECK_EQ(ObjectFields::kMaybePointers,
340 : Map::ObjectFieldsFrom(map->visitor_id()));
341 : return EvacuateObjectDefault(map, slot, object, object_size,
342 41223785 : ObjectFields::kMaybePointers);
343 : }
344 :
345 : template <typename THeapObjectSlot>
346 : SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
347 : HeapObject source) {
348 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
349 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
350 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
351 : SLOW_DCHECK(Heap::InFromPage(source));
352 : SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
353 95017484 : int size = source->SizeFromMap(map);
354 : // Cannot use ::cast() below because that would add checks in debug mode
355 : // that require re-reading the map.
356 : VisitorId visitor_id = map->visitor_id();
357 94823663 : switch (visitor_id) {
358 : case kVisitThinString:
359 : // At the moment we don't allow weak pointers to thin strings.
360 : DCHECK(!(*slot)->IsWeak());
361 : return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
362 481936 : size);
363 : case kVisitShortcutCandidate:
364 : DCHECK(!(*slot)->IsWeak());
365 : // At the moment we don't allow weak pointers to cons strings.
366 : return EvacuateShortcutCandidate(
367 41701424 : map, slot, ConsString::unchecked_cast(source), size);
368 : default:
369 : return EvacuateObjectDefault(map, slot, source, size,
370 : Map::ObjectFieldsFrom(visitor_id));
371 : }
372 : }
373 :
374 : template <typename THeapObjectSlot>
375 128422075 : SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
376 : HeapObject object) {
377 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
378 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
379 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
380 : DCHECK(Heap::InFromPage(object));
381 :
382 : // Synchronized load that consumes the publishing CAS of MigrateObject.
383 : MapWord first_word = object->synchronized_map_word();
384 :
385 : // If the first word is a forwarding address, the object has already been
386 : // copied.
387 128422075 : if (first_word.IsForwardingAddress()) {
388 : HeapObject dest = first_word.ToForwardingAddress();
389 : HeapObjectReference::Update(p, dest);
390 : DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
391 : Heap::InToPage(dest) || Heap::IsLargeObject(dest));
392 :
393 33404591 : return Heap::InYoungGeneration(dest) ? KEEP_SLOT : REMOVE_SLOT;
394 : }
395 :
396 : Map map = first_word.ToMap();
397 : // AllocationMementos are unrooted and shouldn't survive a scavenge
398 : DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
399 : // Call the slow part of scavenge object.
400 94738033 : return EvacuateObject(p, map, object);
401 : }
402 :
403 : template <typename TSlot>
404 28679554 : SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
405 : static_assert(
406 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
407 : std::is_same<TSlot, MaybeObjectSlot>::value,
408 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
409 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
410 : MaybeObject object = *slot;
411 28679554 : if (Heap::InFromPage(object)) {
412 28472408 : HeapObject heap_object = object->GetHeapObject();
413 :
414 : SlotCallbackResult result =
415 28472408 : ScavengeObject(THeapObjectSlot(slot), heap_object);
416 : DCHECK_IMPLIES(result == REMOVE_SLOT,
417 : !heap->InYoungGeneration((*slot)->GetHeapObject()));
418 : return result;
419 207146 : } else if (Heap::InToPage(object)) {
420 : // Already updated slot. This can happen when processing of the work list
421 : // is interleaved with processing roots.
422 : return KEEP_SLOT;
423 : }
424 : // Slots can point to "to" space if the slot has been recorded multiple
425 : // times in the remembered set. We remove the redundant slot now.
426 207156 : return REMOVE_SLOT;
427 : }
428 :
429 168772 : void ScavengeVisitor::VisitPointers(HeapObject host, ObjectSlot start,
430 : ObjectSlot end) {
431 168895 : return VisitPointersImpl(host, start, end);
432 : }
433 :
434 12 : void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
435 : MaybeObjectSlot end) {
436 12 : return VisitPointersImpl(host, start, end);
437 : }
438 :
439 0 : void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
440 0 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
441 : #ifdef DEBUG
442 : Code old_target = target;
443 : #endif
444 0 : FullObjectSlot slot(&target);
445 : VisitHeapObjectImpl(slot, target);
446 : // Code objects are never in new-space, so the slot contents must not change.
447 : DCHECK_EQ(old_target, target);
448 0 : }
449 :
450 0 : void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
451 0 : HeapObject heap_object = rinfo->target_object();
452 : #ifdef DEBUG
453 : HeapObject old_heap_object = heap_object;
454 : #endif
455 0 : FullObjectSlot slot(&heap_object);
456 : VisitHeapObjectImpl(slot, heap_object);
457 : // We don't embed new-space objects into code, so the slot contents must not
458 : // change.
459 : DCHECK_EQ(old_heap_object, heap_object);
460 0 : }
461 :
462 : template <typename TSlot>
463 : void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
464 308450785 : if (Heap::InYoungGeneration(heap_object)) {
465 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
466 49521907 : scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
467 : }
468 : }
469 :
470 : template <typename TSlot>
471 : void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
472 : TSlot end) {
473 476943693 : for (TSlot slot = start; slot < end; ++slot) {
474 168865 : typename TSlot::TObject object = *slot;
475 169097 : HeapObject heap_object;
476 : // Treat weak references as strong.
477 430153219 : if (object.GetHeapObject(&heap_object)) {
478 : VisitHeapObjectImpl(slot, heap_object);
479 : }
480 : }
481 : }
482 :
483 : int ScavengeVisitor::VisitEphemeronHashTable(Map map,
484 : EphemeronHashTable table) {
485 : // Register table with the scavenger, so it can take care of the weak keys
486 : // later. This allows to only iterate the tables' values, which are treated
487 : // as strong independetly of whether the key is live.
488 1709 : scavenger_->AddEphemeronHashTable(table);
489 16117 : for (int i = 0; i < table->Capacity(); i++) {
490 : ObjectSlot value_slot =
491 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
492 : VisitPointer(table, value_slot);
493 : }
494 :
495 1709 : return table->SizeFromMap(map);
496 : }
497 :
498 : } // namespace internal
499 : } // namespace v8
500 :
501 : #endif // V8_HEAP_SCAVENGER_INL_H_
|