Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_INL_H_
6 : #define V8_HEAP_SPACES_INL_H_
7 :
8 : #include "src/heap/incremental-marking.h"
9 : #include "src/heap/spaces.h"
10 : #include "src/isolate.h"
11 : #include "src/msan.h"
12 : #include "src/profiler/heap-profiler.h"
13 : #include "src/v8memory.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : template <class PAGE_TYPE>
19 7671949 : PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
20 15343898 : p_ = p_->next_page();
21 7671949 : return *this;
22 : }
23 :
24 : template <class PAGE_TYPE>
25 : PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
26 : PageIteratorImpl<PAGE_TYPE> tmp(*this);
27 3798206 : operator++();
28 : return tmp;
29 : }
30 :
31 311106 : PageRange::PageRange(Address start, Address limit)
32 : : begin_(Page::FromAddress(start)),
33 622212 : end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
34 : #ifdef DEBUG
35 : if (begin_->InNewSpace()) {
36 : SemiSpace::AssertValidRange(start, limit);
37 : }
38 : #endif // DEBUG
39 311106 : }
40 :
41 : // -----------------------------------------------------------------------------
42 : // SemiSpaceIterator
43 :
44 15541846 : HeapObject* SemiSpaceIterator::Next() {
45 32795154 : while (current_ != limit_) {
46 17228779 : if (Page::IsAlignedToPageSize(current_)) {
47 : Page* page = Page::FromAllocationAreaAddress(current_);
48 : page = page->next_page();
49 : DCHECK(!page->is_anchor());
50 811 : current_ = page->area_start();
51 811 : if (current_ == limit_) return nullptr;
52 : }
53 17228779 : HeapObject* object = HeapObject::FromAddress(current_);
54 17228779 : current_ += object->Size();
55 17228779 : if (!object->IsFiller()) {
56 : return object;
57 : }
58 : }
59 : return nullptr;
60 : }
61 :
62 : // -----------------------------------------------------------------------------
63 : // HeapObjectIterator
64 :
65 354147290 : HeapObject* HeapObjectIterator::Next() {
66 301400 : do {
67 354375103 : HeapObject* next_obj = FromCurrentPage();
68 354375103 : if (next_obj != nullptr) return next_obj;
69 : } while (AdvanceToNextPage());
70 : return nullptr;
71 : }
72 :
73 354375103 : HeapObject* HeapObjectIterator::FromCurrentPage() {
74 729100674 : while (cur_addr_ != cur_end_) {
75 748859930 : if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
76 11403 : cur_addr_ = space_->limit();
77 11403 : continue;
78 : }
79 374412768 : HeapObject* obj = HeapObject::FromAddress(cur_addr_);
80 374412768 : const int obj_size = obj->Size();
81 374412768 : cur_addr_ += obj_size;
82 : DCHECK_LE(cur_addr_, cur_end_);
83 374412768 : if (!obj->IsFiller()) {
84 : if (obj->IsCode()) {
85 : DCHECK_EQ(space_, space_->heap()->code_space());
86 : DCHECK_CODEOBJECT_SIZE(obj_size, space_);
87 : } else {
88 : DCHECK_OBJECT_SIZE(obj_size);
89 : }
90 354073703 : return obj;
91 : }
92 : }
93 : return nullptr;
94 : }
95 :
96 : // -----------------------------------------------------------------------------
97 : // MemoryAllocator
98 :
99 : #ifdef ENABLE_HEAP_PROTECTION
100 :
101 : void MemoryAllocator::Protect(Address start, size_t size) {
102 : base::OS::Protect(start, size);
103 : }
104 :
105 :
106 : void MemoryAllocator::Unprotect(Address start, size_t size,
107 : Executability executable) {
108 : base::OS::Unprotect(start, size, executable);
109 : }
110 :
111 :
112 : void MemoryAllocator::ProtectChunkFromPage(Page* page) {
113 : int id = GetChunkId(page);
114 : base::OS::Protect(chunks_[id].address(), chunks_[id].size());
115 : }
116 :
117 :
118 : void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
119 : int id = GetChunkId(page);
120 : base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
121 : chunks_[id].owner()->executable() == EXECUTABLE);
122 : }
123 :
124 : #endif
125 :
126 : // -----------------------------------------------------------------------------
127 : // SemiSpace
128 :
129 : bool SemiSpace::Contains(HeapObject* o) {
130 : return id_ == kToSpace
131 3128681 : ? MemoryChunk::FromAddress(o->address())->InToSpace()
132 3128681 : : MemoryChunk::FromAddress(o->address())->InFromSpace();
133 : }
134 :
135 6257362 : bool SemiSpace::Contains(Object* o) {
136 6257362 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
137 : }
138 :
139 0 : bool SemiSpace::ContainsSlow(Address a) {
140 0 : for (Page* p : *this) {
141 0 : if (p == MemoryChunk::FromAddress(a)) return true;
142 : }
143 0 : return false;
144 : }
145 :
146 : // --------------------------------------------------------------------------
147 : // NewSpace
148 :
149 : bool NewSpace::Contains(HeapObject* o) {
150 24 : return MemoryChunk::FromAddress(o->address())->InNewSpace();
151 : }
152 :
153 : bool NewSpace::Contains(Object* o) {
154 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
155 : }
156 :
157 : bool NewSpace::ContainsSlow(Address a) {
158 : return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
159 : }
160 :
161 : bool NewSpace::ToSpaceContainsSlow(Address a) {
162 0 : return to_space_.ContainsSlow(a);
163 : }
164 :
165 : bool NewSpace::FromSpaceContainsSlow(Address a) {
166 : return from_space_.ContainsSlow(a);
167 : }
168 :
169 3128681 : bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
170 : bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
171 :
172 377598 : Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
173 188799 : SemiSpace* owner) {
174 : DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
175 : bool in_to_space = (owner->id() != kFromSpace);
176 : chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
177 188799 : : MemoryChunk::IN_FROM_SPACE);
178 : DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
179 : : MemoryChunk::IN_TO_SPACE));
180 : Page* page = static_cast<Page*>(chunk);
181 188799 : heap->incremental_marking()->SetNewSpacePageFlags(page);
182 188799 : page->AllocateLocalTracker();
183 188799 : if (FLAG_minor_mc) {
184 : page->AllocateYoungGenerationBitmap();
185 : MarkingState::External(page).ClearLiveness();
186 : }
187 188799 : return page;
188 : }
189 :
190 : // --------------------------------------------------------------------------
191 : // PagedSpace
192 :
193 : template <Page::InitializationMode mode>
194 1011400 : Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
195 : PagedSpace* owner) {
196 : Page* page = reinterpret_cast<Page*>(chunk);
197 : DCHECK(page->area_size() <= kAllocatableMemory);
198 : DCHECK(chunk->owner() == owner);
199 :
200 666 : owner->IncreaseCapacity(page->area_size());
201 505700 : heap->incremental_marking()->SetOldSpacePageFlags(chunk);
202 :
203 : // Make sure that categories are initialized before freeing the area.
204 : page->InitializeFreeListCategories();
205 : // In the case we do not free the memory, we effectively account for the whole
206 : // page as allocated memory that cannot be used for further allocations.
207 : if (mode == kFreeMemory) {
208 : owner->Free(page->area_start(), page->area_size());
209 : }
210 :
211 505700 : return page;
212 : }
213 :
214 666 : Page* Page::ConvertNewToOld(Page* old_page) {
215 : DCHECK(!old_page->is_anchor());
216 : DCHECK(old_page->InNewSpace());
217 1998 : OldSpace* old_space = old_page->heap()->old_space();
218 : old_page->set_owner(old_space);
219 : old_page->SetFlags(0, static_cast<uintptr_t>(~0));
220 : old_space->AccountCommitted(old_page->size());
221 : Page* new_page = Page::Initialize<kDoNotFreeMemory>(
222 666 : old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
223 666 : new_page->InsertAfter(old_space->anchor()->prev_page());
224 666 : return new_page;
225 : }
226 :
227 : void Page::InitializeFreeListCategories() {
228 3034650 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
229 3034650 : categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
230 : }
231 : }
232 :
233 5647697 : bool PagedSpace::Contains(Address addr) {
234 5647697 : return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
235 : }
236 :
237 3129821 : bool PagedSpace::Contains(Object* o) {
238 3129821 : if (!o->IsHeapObject()) return false;
239 3129821 : Page* p = Page::FromAddress(HeapObject::cast(o)->address());
240 3129821 : if (!Page::IsValid(p)) return false;
241 3129821 : return p->owner() == this;
242 : }
243 :
244 : void PagedSpace::UnlinkFreeListCategories(Page* page) {
245 : DCHECK_EQ(this, page->owner());
246 213672 : page->ForAllFreeListCategories([this](FreeListCategory* category) {
247 : DCHECK_EQ(free_list(), category->owner());
248 213672 : free_list()->RemoveCategory(category);
249 213672 : });
250 : }
251 :
252 : intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
253 : DCHECK_EQ(this, page->owner());
254 515034 : intptr_t added = 0;
255 3090204 : page->ForAllFreeListCategories([&added](FreeListCategory* category) {
256 3090204 : added += category->available();
257 3090204 : category->Relink();
258 515034 : });
259 : DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
260 479422 : return added;
261 : }
262 :
263 342745661 : MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
264 : MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
265 289643592 : uintptr_t offset = addr - chunk->address();
266 579232781 : if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
267 53102069 : chunk = heap->lo_space()->FindPageThreadSafe(addr);
268 : }
269 289643592 : return chunk;
270 : }
271 :
272 : Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
273 283995895 : return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
274 : }
275 :
276 : void Page::MarkNeverAllocateForTesting() {
277 : DCHECK(this->owner()->identity() != NEW_SPACE);
278 : DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
279 : SetFlag(NEVER_ALLOCATE_ON_PAGE);
280 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
281 : }
282 :
283 9695 : void Page::MarkEvacuationCandidate() {
284 : DCHECK(!IsFlagSet(NEVER_EVACUATE));
285 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
286 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
287 : SetFlag(EVACUATION_CANDIDATE);
288 9695 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
289 9695 : }
290 :
291 : void Page::ClearEvacuationCandidate() {
292 : if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
293 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
294 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
295 : }
296 : ClearFlag(EVACUATION_CANDIDATE);
297 : InitializeFreeListCategories();
298 : }
299 :
300 1220405 : MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
301 : : heap_(heap),
302 : state_(kOldSpaceState),
303 : old_iterator_(heap->old_space()->begin()),
304 : code_iterator_(heap->code_space()->begin()),
305 : map_iterator_(heap->map_space()->begin()),
306 1464486 : lo_iterator_(heap->lo_space()->begin()) {}
307 :
308 2625105 : MemoryChunk* MemoryChunkIterator::next() {
309 2625105 : switch (state_) {
310 : case kOldSpaceState: {
311 4232226 : if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
312 244081 : state_ = kMapState;
313 : // Fall through.
314 : }
315 : case kMapState: {
316 1506026 : if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
317 244081 : state_ = kCodeState;
318 : // Fall through.
319 : }
320 : case kCodeState: {
321 2378514 : if (code_iterator_ != heap_->code_space()->end())
322 945176 : return *(code_iterator_++);
323 244081 : state_ = kLargeObjectState;
324 : // Fall through.
325 : }
326 : case kLargeObjectState: {
327 270100 : if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
328 244081 : state_ = kFinishedState;
329 : // Fall through;
330 : }
331 : case kFinishedState:
332 : return nullptr;
333 : default:
334 : break;
335 : }
336 0 : UNREACHABLE();
337 : return nullptr;
338 : }
339 :
340 : Page* FreeListCategory::page() {
341 : return Page::FromAddress(reinterpret_cast<Address>(this));
342 : }
343 :
344 : FreeList* FreeListCategory::owner() {
345 : return reinterpret_cast<PagedSpace*>(
346 4232985 : Page::FromAddress(reinterpret_cast<Address>(this))->owner())
347 : ->free_list();
348 : }
349 :
350 0 : bool FreeListCategory::is_linked() {
351 0 : return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
352 : }
353 :
354 : // Try linear allocation in the page of alloc_info's allocation top. Does
355 : // not contain slow case logic (e.g. move to the next page or try free list
356 : // allocation) so it can be used by all the allocation functions and for all
357 : // the paged spaces.
358 : HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
359 699925772 : Address current_top = allocation_info_.top();
360 349962886 : Address new_top = current_top + size_in_bytes;
361 349962886 : if (new_top > allocation_info_.limit()) return NULL;
362 :
363 : allocation_info_.set_top(new_top);
364 347728510 : return HeapObject::FromAddress(current_top);
365 : }
366 :
367 :
368 15010048 : AllocationResult LocalAllocationBuffer::AllocateRawAligned(
369 : int size_in_bytes, AllocationAlignment alignment) {
370 30032032 : Address current_top = allocation_info_.top();
371 15010048 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
372 :
373 15021984 : Address new_top = current_top + filler_size + size_in_bytes;
374 15021984 : if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
375 :
376 : allocation_info_.set_top(new_top);
377 14851056 : if (filler_size > 0) {
378 : return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
379 0 : filler_size);
380 : }
381 :
382 14851056 : return AllocationResult(HeapObject::FromAddress(current_top));
383 : }
384 :
385 :
386 : HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
387 : AllocationAlignment alignment) {
388 : Address current_top = allocation_info_.top();
389 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
390 :
391 : Address new_top = current_top + filler_size + *size_in_bytes;
392 : if (new_top > allocation_info_.limit()) return NULL;
393 :
394 : allocation_info_.set_top(new_top);
395 : if (filler_size > 0) {
396 : *size_in_bytes += filler_size;
397 : return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
398 : filler_size);
399 : }
400 :
401 : return HeapObject::FromAddress(current_top);
402 : }
403 :
404 :
405 : // Raw allocation.
406 349962886 : AllocationResult PagedSpace::AllocateRawUnaligned(
407 : int size_in_bytes, UpdateSkipList update_skip_list) {
408 : HeapObject* object = AllocateLinearly(size_in_bytes);
409 :
410 349962886 : if (object == NULL) {
411 2235566 : object = free_list_.Allocate(size_in_bytes);
412 2235551 : if (object == NULL) {
413 554716 : object = SlowAllocateRaw(size_in_bytes);
414 : }
415 299872799 : if (object != NULL && heap()->incremental_marking()->black_allocation()) {
416 1035 : Address start = object->address();
417 1035 : Address end = object->address() + size_in_bytes;
418 1035 : Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
419 : }
420 : }
421 :
422 349963235 : if (object != NULL) {
423 647589387 : if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
424 2737029 : SkipList::Update(object->address(), size_in_bytes);
425 : }
426 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
427 349957869 : return object;
428 : }
429 :
430 : return AllocationResult::Retry(identity());
431 : }
432 :
433 :
434 : AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
435 : int size_in_bytes) {
436 : base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
437 : return AllocateRawUnaligned(size_in_bytes);
438 : }
439 :
440 :
441 : // Raw allocation.
442 : AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
443 : AllocationAlignment alignment) {
444 : DCHECK(identity() == OLD_SPACE);
445 : int allocation_size = size_in_bytes;
446 : HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
447 :
448 : if (object == NULL) {
449 : // We don't know exactly how much filler we need to align until space is
450 : // allocated, so assume the worst case.
451 : int filler_size = Heap::GetMaximumFillToAlign(alignment);
452 : allocation_size += filler_size;
453 : object = free_list_.Allocate(allocation_size);
454 : if (object == NULL) {
455 : object = SlowAllocateRaw(allocation_size);
456 : }
457 : if (object != NULL) {
458 : if (heap()->incremental_marking()->black_allocation()) {
459 : Address start = object->address();
460 : Address end = object->address() + allocation_size;
461 : Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
462 : }
463 : if (filler_size != 0) {
464 : object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
465 : alignment);
466 : // Filler objects are initialized, so mark only the aligned object
467 : // memory as uninitialized.
468 : allocation_size = size_in_bytes;
469 : }
470 : }
471 : }
472 :
473 : if (object != NULL) {
474 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
475 : return object;
476 : }
477 :
478 : return AllocationResult::Retry(identity());
479 : }
480 :
481 :
482 263211159 : AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
483 : AllocationAlignment alignment) {
484 : #ifdef V8_HOST_ARCH_32_BIT
485 : AllocationResult result =
486 : alignment == kDoubleAligned
487 : ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
488 : : AllocateRawUnaligned(size_in_bytes);
489 : #else
490 263211159 : AllocationResult result = AllocateRawUnaligned(size_in_bytes);
491 : #endif
492 : HeapObject* heap_obj = nullptr;
493 526423406 : if (!result.IsRetry() && result.To(&heap_obj)) {
494 263211045 : AllocationStep(heap_obj->address(), size_in_bytes);
495 : }
496 263236058 : return result;
497 : }
498 :
499 :
500 : // -----------------------------------------------------------------------------
501 : // NewSpace
502 :
503 :
504 : AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
505 : AllocationAlignment alignment) {
506 : Address top = allocation_info_.top();
507 : int filler_size = Heap::GetFillToAlign(top, alignment);
508 : int aligned_size_in_bytes = size_in_bytes + filler_size;
509 :
510 : if (allocation_info_.limit() - top < aligned_size_in_bytes) {
511 : // See if we can create room.
512 : if (!EnsureAllocation(size_in_bytes, alignment)) {
513 : return AllocationResult::Retry();
514 : }
515 :
516 : top = allocation_info_.top();
517 : filler_size = Heap::GetFillToAlign(top, alignment);
518 : aligned_size_in_bytes = size_in_bytes + filler_size;
519 : }
520 :
521 : HeapObject* obj = HeapObject::FromAddress(top);
522 : allocation_info_.set_top(top + aligned_size_in_bytes);
523 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
524 :
525 : if (filler_size > 0) {
526 : obj = heap()->PrecedeWithFiller(obj, filler_size);
527 : }
528 :
529 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
530 :
531 : return obj;
532 : }
533 :
534 :
535 : AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
536 418470834 : Address top = allocation_info_.top();
537 418232035 : if (allocation_info_.limit() < top + size_in_bytes) {
538 : // See if we can create room.
539 306133 : if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
540 67334 : return AllocationResult::Retry();
541 : }
542 :
543 : top = allocation_info_.top();
544 : }
545 :
546 418164701 : HeapObject* obj = HeapObject::FromAddress(top);
547 418164726 : allocation_info_.set_top(top + size_in_bytes);
548 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
549 :
550 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
551 :
552 418164726 : return obj;
553 : }
554 :
555 :
556 : AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
557 : AllocationAlignment alignment) {
558 : #ifdef V8_HOST_ARCH_32_BIT
559 : return alignment == kDoubleAligned
560 : ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
561 : : AllocateRawUnaligned(size_in_bytes);
562 : #else
563 : return AllocateRawUnaligned(size_in_bytes);
564 : #endif
565 : }
566 :
567 :
568 483922 : MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
569 : int size_in_bytes, AllocationAlignment alignment) {
570 483922 : base::LockGuard<base::Mutex> guard(&mutex_);
571 483987 : return AllocateRaw(size_in_bytes, alignment);
572 : }
573 :
574 132224 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
575 : Executability executable, Space* owner) {
576 21464 : if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
577 : STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
578 0 : FATAL("Code page is too large.");
579 : }
580 20901 : heap->incremental_marking()->SetOldSpacePageFlags(chunk);
581 :
582 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
583 :
584 : // Initialize the owner field for each contained page (except the first, which
585 : // is initialized by MemoryChunk::Initialize).
586 179718 : for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
587 : addr < chunk->area_end(); addr += Page::kPageSize) {
588 : // Clear out kPageHeaderTag.
589 68958 : Memory::Address_at(addr) = 0;
590 : }
591 :
592 20901 : return static_cast<LargePage*>(chunk);
593 : }
594 :
595 141635 : size_t LargeObjectSpace::Available() {
596 283270 : return ObjectSizeFor(heap()->memory_allocator()->Available());
597 : }
598 :
599 :
600 : LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
601 59289 : return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
602 : }
603 :
604 :
605 205712 : LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
606 : AllocationResult result,
607 : intptr_t size) {
608 205712 : if (result.IsRetry()) return InvalidBuffer();
609 : HeapObject* obj = nullptr;
610 : bool ok = result.To(&obj);
611 : USE(ok);
612 : DCHECK(ok);
613 205712 : Address top = HeapObject::cast(obj)->address();
614 411424 : return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
615 : }
616 :
617 :
618 : bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
619 205713 : if (allocation_info_.top() == other->allocation_info_.limit()) {
620 : allocation_info_.set_top(other->allocation_info_.top());
621 : other->allocation_info_.Reset(nullptr, nullptr);
622 : return true;
623 : }
624 : return false;
625 : }
626 :
627 : } // namespace internal
628 : } // namespace v8
629 :
630 : #endif // V8_HEAP_SPACES_INL_H_
|