Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SCAVENGER_INL_H_
6 : #define V8_HEAP_SCAVENGER_INL_H_
7 :
8 : #include "src/heap/scavenger.h"
9 :
10 : #include "src/heap/incremental-marking-inl.h"
11 : #include "src/heap/local-allocator-inl.h"
12 : #include "src/objects-inl.h"
13 : #include "src/objects/map.h"
14 : #include "src/objects/slots-inl.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 :
19 25102967 : void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
20 : int size) {
21 25102967 : promotion_list_->PushRegularObject(task_id_, object, size);
22 25100027 : }
23 :
24 : void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
25 : int size) {
26 : promotion_list_->PushLargeObject(task_id_, object, map, size);
27 : }
28 :
29 : bool Scavenger::PromotionList::View::IsEmpty() {
30 : return promotion_list_->IsEmpty();
31 : }
32 :
33 : size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
34 : return promotion_list_->LocalPushSegmentSize(task_id_);
35 : }
36 :
37 : bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
38 44999985 : return promotion_list_->Pop(task_id_, entry);
39 : }
40 :
41 : bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
42 348208 : return promotion_list_->IsGlobalPoolEmpty();
43 : }
44 :
45 : bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
46 : return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
47 : }
48 :
49 : void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
50 : int size) {
51 45888121 : regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
52 : }
53 :
54 : void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
55 : Map map, int size) {
56 3247 : large_object_promotion_list_.Push(task_id, {object, map, size});
57 : }
58 :
59 : bool Scavenger::PromotionList::IsEmpty() {
60 : return regular_object_promotion_list_.IsEmpty() &&
61 : large_object_promotion_list_.IsEmpty();
62 : }
63 :
64 : size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
65 : return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
66 54693825 : large_object_promotion_list_.LocalPushSegmentSize(task_id);
67 : }
68 :
69 45002819 : bool Scavenger::PromotionList::Pop(int task_id,
70 : struct PromotionListEntry* entry) {
71 45002819 : ObjectAndSize regular_object;
72 45002819 : if (regular_object_promotion_list_.Pop(task_id, ®ular_object)) {
73 44823696 : entry->heap_object = regular_object.first;
74 44823696 : entry->size = regular_object.second;
75 44823696 : entry->map = entry->heap_object->map();
76 44823696 : return true;
77 : }
78 153544 : return large_object_promotion_list_.Pop(task_id, entry);
79 : }
80 :
81 348200 : bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
82 581128 : return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
83 348198 : large_object_promotion_list_.IsGlobalPoolEmpty();
84 : }
85 :
86 : bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
87 : // Threshold when to prioritize processing of the promotion list. Right
88 : // now we only look into the regular object list.
89 : const int kProcessPromotionListThreshold =
90 : kRegularObjectPromotionListSegmentSize / 2;
91 : return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
92 : }
93 :
94 : void Scavenger::PageMemoryFence(MaybeObject object) {
95 : #ifdef THREAD_SANITIZER
96 : // Perform a dummy acquire load to tell TSAN that there is no data race
97 : // with page initialization.
98 : HeapObject heap_object;
99 : if (object->GetHeapObject(&heap_object)) {
100 : MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
101 : CHECK_NOT_NULL(chunk->synchronized_heap());
102 : }
103 : #endif
104 : }
105 :
106 : bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
107 : int size) {
108 : // Copy the content of source to target.
109 25141397 : target->set_map_word(MapWord::FromMap(map));
110 : heap()->CopyBlock(target->address() + kTaggedSize,
111 164404620 : source->address() + kTaggedSize, size - kTaggedSize);
112 :
113 50294178 : Object old = source->map_slot().Release_CompareAndSwap(
114 50290376 : map, MapWord::FromForwardingAddress(target).ToMap());
115 114152370 : if (old != map) {
116 : // Other task migrated the object.
117 : return false;
118 : }
119 :
120 115488465 : if (V8_UNLIKELY(is_logging_)) {
121 77858 : heap()->OnMoveEvent(target, source, size);
122 : }
123 :
124 115540993 : if (is_incremental_marking_) {
125 : heap()->incremental_marking()->TransferColor(source, target);
126 : }
127 115530635 : heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
128 : return true;
129 : }
130 :
131 : template <typename THeapObjectSlot>
132 : CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
133 : Map map, THeapObjectSlot slot, HeapObject object, int object_size,
134 : ObjectFields object_fields) {
135 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
136 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
137 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
138 : DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
139 : AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
140 : AllocationResult allocation =
141 64925270 : allocator_.Allocate(NEW_SPACE, object_size, alignment);
142 :
143 0 : HeapObject target;
144 64699476 : if (allocation.To(&target)) {
145 : DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
146 : target));
147 : const bool self_success = MigrateObject(map, object, target, object_size);
148 63521497 : if (!self_success) {
149 0 : allocator_.FreeLast(NEW_SPACE, target, object_size);
150 0 : MapWord map_word = object->synchronized_map_word();
151 0 : HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
152 : DCHECK(!Heap::InFromPage(*slot));
153 38230 : return Heap::InToPage(*slot)
154 : ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
155 56529 : : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
156 : }
157 : HeapObjectReference::Update(slot, target);
158 34054900 : if (object_fields == ObjectFields::kMaybePointers) {
159 55299988 : copied_list_.Push(ObjectAndSize(target, object_size));
160 : }
161 63266980 : copied_size_ += object_size;
162 : return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
163 : }
164 : return CopyAndForwardResult::FAILURE;
165 : }
166 :
167 : template <typename THeapObjectSlot>
168 : CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
169 : HeapObject object,
170 : int object_size,
171 : ObjectFields object_fields) {
172 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
173 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
174 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
175 : AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
176 : AllocationResult allocation =
177 51170167 : allocator_.Allocate(OLD_SPACE, object_size, alignment);
178 :
179 25143508 : HeapObject target;
180 51131793 : if (allocation.To(&target)) {
181 : DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
182 : target));
183 : const bool self_success = MigrateObject(map, object, target, object_size);
184 51836959 : if (!self_success) {
185 67169 : allocator_.FreeLast(OLD_SPACE, target, object_size);
186 67169 : MapWord map_word = object->synchronized_map_word();
187 67169 : HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
188 : DCHECK(!Heap::InFromPage(*slot));
189 134338 : return Heap::InToPage(*slot)
190 : ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
191 239956 : : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
192 : }
193 : HeapObjectReference::Update(slot, target);
194 26489587 : if (object_fields == ObjectFields::kMaybePointers) {
195 25103365 : promotion_list_.PushRegularObject(target, object_size);
196 : }
197 51446875 : promoted_size_ += object_size;
198 : return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
199 : }
200 : return CopyAndForwardResult::FAILURE;
201 : }
202 :
203 : SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
204 : CopyAndForwardResult result) {
205 : DCHECK_NE(CopyAndForwardResult::FAILURE, result);
206 : return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
207 114837069 : : REMOVE_SLOT;
208 : }
209 :
210 : bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
211 : ObjectFields object_fields) {
212 : // TODO(hpayer): Make this check size based, i.e.
213 : // object_size > kMaxRegularHeapObjectSize
214 228563136 : if (V8_UNLIKELY(
215 : FLAG_young_generation_large_objects &&
216 : MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
217 : DCHECK_EQ(NEW_LO_SPACE,
218 : MemoryChunk::FromHeapObject(object)->owner()->identity());
219 3988 : if (object->map_slot().Release_CompareAndSwap(
220 : map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
221 3987 : surviving_new_large_objects_.insert({object, map});
222 3987 : promoted_size_ += object_size;
223 3986 : if (object_fields == ObjectFields::kMaybePointers) {
224 : promotion_list_.PushLargeObject(object, map, object_size);
225 : }
226 : }
227 : return true;
228 : }
229 : return false;
230 : }
231 :
232 : template <typename THeapObjectSlot>
233 : SlotCallbackResult Scavenger::EvacuateObjectDefault(
234 : Map map, THeapObjectSlot slot, HeapObject object, int object_size,
235 : ObjectFields object_fields) {
236 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
237 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
238 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
239 : SLOW_DCHECK(object->SizeFromMap(map) == object_size);
240 : CopyAndForwardResult result;
241 :
242 114236219 : if (HandleLargeObject(map, object, object_size, object_fields)) {
243 : return KEEP_SLOT;
244 : }
245 :
246 : SLOW_DCHECK(static_cast<size_t>(object_size) <=
247 : MemoryChunkLayout::AllocatableMemoryInDataPage());
248 :
249 114253813 : if (!heap()->ShouldBePromoted(object->address())) {
250 : // A semi-space copy may fail due to fragmentation. In that case, we
251 : // try to promote the object.
252 : result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
253 65145308 : if (result != CopyAndForwardResult::FAILURE) {
254 : return RememberedSetEntryNeeded(result);
255 : }
256 : }
257 :
258 : // We may want to promote this object if the object was already semi-space
259 : // copied in a previes young generation GC or if the semi-space copy above
260 : // failed.
261 : result = PromoteObject(map, slot, object, object_size, object_fields);
262 51533385 : if (result != CopyAndForwardResult::FAILURE) {
263 : return RememberedSetEntryNeeded(result);
264 : }
265 :
266 : // If promotion failed, we try to copy the object to the other semi-space.
267 : result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
268 0 : if (result != CopyAndForwardResult::FAILURE) {
269 : return RememberedSetEntryNeeded(result);
270 : }
271 :
272 0 : heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
273 0 : UNREACHABLE();
274 : }
275 :
276 : template <typename THeapObjectSlot>
277 651575 : SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
278 : ThinString object,
279 : int object_size) {
280 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
281 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
282 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
283 651575 : if (!is_incremental_marking_) {
284 : // The ThinString should die after Scavenge, so avoid writing the proper
285 : // forwarding pointer and instead just signal the actual object as forwarded
286 : // reference.
287 : String actual = object->actual();
288 : // ThinStrings always refer to internalized strings, which are always in old
289 : // space.
290 : DCHECK(!Heap::InYoungGeneration(actual));
291 : HeapObjectReference::Update(slot, actual);
292 : return REMOVE_SLOT;
293 : }
294 :
295 : DCHECK_EQ(ObjectFields::kMaybePointers,
296 : Map::ObjectFieldsFrom(map->visitor_id()));
297 : return EvacuateObjectDefault(map, slot, object, object_size,
298 10431 : ObjectFields::kMaybePointers);
299 : }
300 :
301 : template <typename THeapObjectSlot>
302 55027491 : SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
303 : THeapObjectSlot slot,
304 : ConsString object,
305 : int object_size) {
306 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
307 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
308 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
309 : DCHECK(IsShortcutCandidate(map->instance_type()));
310 108703974 : if (!is_incremental_marking_ &&
311 : object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
312 507223 : HeapObject first = HeapObject::cast(object->unchecked_first());
313 :
314 : HeapObjectReference::Update(slot, first);
315 :
316 507223 : if (!Heap::InYoungGeneration(first)) {
317 : object->map_slot().Release_Store(
318 : MapWord::FromForwardingAddress(first).ToMap());
319 296 : return REMOVE_SLOT;
320 : }
321 :
322 : MapWord first_word = first->synchronized_map_word();
323 506927 : if (first_word.IsForwardingAddress()) {
324 : HeapObject target = first_word.ToForwardingAddress();
325 :
326 : HeapObjectReference::Update(slot, target);
327 : object->map_slot().Release_Store(
328 : MapWord::FromForwardingAddress(target).ToMap());
329 10676 : return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
330 : }
331 : Map map = first_word.ToMap();
332 : SlotCallbackResult result =
333 : EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map),
334 496251 : Map::ObjectFieldsFrom(map->visitor_id()));
335 : object->map_slot().Release_Store(
336 : MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
337 496816 : return result;
338 : }
339 : DCHECK_EQ(ObjectFields::kMaybePointers,
340 : Map::ObjectFieldsFrom(map->visitor_id()));
341 : return EvacuateObjectDefault(map, slot, object, object_size,
342 54515503 : ObjectFields::kMaybePointers);
343 : }
344 :
345 : template <typename THeapObjectSlot>
346 : SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
347 : HeapObject source) {
348 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
349 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
350 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
351 : SLOW_DCHECK(Heap::InFromPage(source));
352 : SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
353 115035354 : int size = source->SizeFromMap(map);
354 : // Cannot use ::cast() below because that would add checks in debug mode
355 : // that require re-reading the map.
356 : VisitorId visitor_id = map->visitor_id();
357 114906386 : switch (visitor_id) {
358 : case kVisitThinString:
359 : // At the moment we don't allow weak pointers to thin strings.
360 : DCHECK(!(*slot)->IsWeak());
361 : return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
362 651585 : size);
363 : case kVisitShortcutCandidate:
364 : DCHECK(!(*slot)->IsWeak());
365 : // At the moment we don't allow weak pointers to cons strings.
366 : return EvacuateShortcutCandidate(
367 55024715 : map, slot, ConsString::unchecked_cast(source), size);
368 : default:
369 : return EvacuateObjectDefault(map, slot, source, size,
370 : Map::ObjectFieldsFrom(visitor_id));
371 : }
372 : }
373 :
374 : template <typename THeapObjectSlot>
375 150114894 : SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
376 : HeapObject object) {
377 : static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
378 : std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
379 : "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
380 : DCHECK(Heap::InFromPage(object));
381 :
382 : // Synchronized load that consumes the publishing CAS of MigrateObject.
383 : MapWord first_word = object->synchronized_map_word();
384 :
385 : // If the first word is a forwarding address, the object has already been
386 : // copied.
387 150114894 : if (first_word.IsForwardingAddress()) {
388 : HeapObject dest = first_word.ToForwardingAddress();
389 : HeapObjectReference::Update(p, dest);
390 : DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
391 : Heap::InToPage(dest) || Heap::IsLargeObject(dest));
392 :
393 35079540 : return Heap::InYoungGeneration(dest) ? KEEP_SLOT : REMOVE_SLOT;
394 : }
395 :
396 : Map map = first_word.ToMap();
397 : // AllocationMementos are unrooted and shouldn't survive a scavenge
398 : DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
399 : // Call the slow part of scavenge object.
400 114957190 : return EvacuateObject(p, map, object);
401 : }
402 :
403 : template <typename TSlot>
404 33040343 : SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
405 : static_assert(
406 : std::is_same<TSlot, FullMaybeObjectSlot>::value ||
407 : std::is_same<TSlot, MaybeObjectSlot>::value,
408 : "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
409 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
410 : MaybeObject object = *slot;
411 33040343 : if (Heap::InFromPage(object)) {
412 32222476 : HeapObject heap_object = object->GetHeapObject();
413 :
414 : SlotCallbackResult result =
415 32222476 : ScavengeObject(THeapObjectSlot(slot), heap_object);
416 : DCHECK_IMPLIES(result == REMOVE_SLOT,
417 : !heap->InYoungGeneration((*slot)->GetHeapObject()));
418 : return result;
419 817867 : } else if (Heap::InToPage(object)) {
420 : // Already updated slot. This can happen when processing of the work list
421 : // is interleaved with processing roots.
422 : return KEEP_SLOT;
423 : }
424 : // Slots can point to "to" space if the slot has been recorded multiple
425 : // times in the remembered set. We remove the redundant slot now.
426 817820 : return REMOVE_SLOT;
427 : }
428 :
429 227890 : void ScavengeVisitor::VisitPointers(HeapObject host, ObjectSlot start,
430 : ObjectSlot end) {
431 227916 : return VisitPointersImpl(host, start, end);
432 : }
433 :
434 12 : void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
435 : MaybeObjectSlot end) {
436 12 : return VisitPointersImpl(host, start, end);
437 : }
438 :
439 0 : void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
440 0 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
441 : #ifdef DEBUG
442 : Code old_target = target;
443 : #endif
444 0 : FullObjectSlot slot(&target);
445 : VisitHeapObjectImpl(slot, target);
446 : // Code objects are never in new-space, so the slot contents must not change.
447 : DCHECK_EQ(old_target, target);
448 0 : }
449 :
450 0 : void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
451 0 : HeapObject heap_object = rinfo->target_object();
452 : #ifdef DEBUG
453 : HeapObject old_heap_object = heap_object;
454 : #endif
455 0 : FullObjectSlot slot(&heap_object);
456 : VisitHeapObjectImpl(slot, heap_object);
457 : // We don't embed new-space objects into code, so the slot contents must not
458 : // change.
459 : DCHECK_EQ(old_heap_object, heap_object);
460 0 : }
461 :
462 : template <typename TSlot>
463 : void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
464 331724873 : if (Heap::InYoungGeneration(heap_object)) {
465 : using THeapObjectSlot = typename TSlot::THeapObjectSlot;
466 55996706 : scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
467 : }
468 : }
469 :
470 : template <typename TSlot>
471 : void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
472 : TSlot end) {
473 515005355 : for (TSlot slot = start; slot < end; ++slot) {
474 227907 : typename TSlot::TObject object = *slot;
475 227929 : HeapObject heap_object;
476 : // Treat weak references as strong.
477 460200134 : if (object.GetHeapObject(&heap_object)) {
478 : VisitHeapObjectImpl(slot, heap_object);
479 : }
480 : }
481 : }
482 :
483 : int ScavengeVisitor::VisitEphemeronHashTable(Map map,
484 : EphemeronHashTable table) {
485 : // Register table with the scavenger, so it can take care of the weak keys
486 : // later. This allows to only iterate the tables' values, which are treated
487 : // as strong independetly of whether the key is live.
488 34639 : scavenger_->AddEphemeronHashTable(table);
489 313530 : for (int i = 0; i < table->Capacity(); i++) {
490 : ObjectSlot value_slot =
491 : table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
492 : VisitPointer(table, value_slot);
493 : }
494 :
495 34637 : return table->SizeFromMap(map);
496 : }
497 :
498 : } // namespace internal
499 : } // namespace v8
500 :
501 : #endif // V8_HEAP_SCAVENGER_INL_H_
|