Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_INL_H_
6 : #define V8_HEAP_SPACES_INL_H_
7 :
8 : #include "src/heap/incremental-marking.h"
9 : #include "src/heap/spaces.h"
10 : #include "src/isolate.h"
11 : #include "src/msan.h"
12 : #include "src/profiler/heap-profiler.h"
13 : #include "src/v8memory.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : template <class PAGE_TYPE>
19 7680861 : PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
20 15361722 : p_ = p_->next_page();
21 7680861 : return *this;
22 : }
23 :
24 : template <class PAGE_TYPE>
25 : PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
26 : PageIteratorImpl<PAGE_TYPE> tmp(*this);
27 3802015 : operator++();
28 : return tmp;
29 : }
30 :
31 312160 : PageRange::PageRange(Address start, Address limit)
32 : : begin_(Page::FromAddress(start)),
33 624320 : end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
34 : #ifdef DEBUG
35 : if (begin_->InNewSpace()) {
36 : SemiSpace::AssertValidRange(start, limit);
37 : }
38 : #endif // DEBUG
39 312160 : }
40 :
41 : // -----------------------------------------------------------------------------
42 : // SemiSpaceIterator
43 :
44 15544834 : HeapObject* SemiSpaceIterator::Next() {
45 32801141 : while (current_ != limit_) {
46 17231738 : if (Page::IsAlignedToPageSize(current_)) {
47 : Page* page = Page::FromAllocationAreaAddress(current_);
48 : page = page->next_page();
49 : DCHECK(!page->is_anchor());
50 811 : current_ = page->area_start();
51 811 : if (current_ == limit_) return nullptr;
52 : }
53 17231738 : HeapObject* object = HeapObject::FromAddress(current_);
54 17231738 : current_ += object->Size();
55 17231738 : if (!object->IsFiller()) {
56 : return object;
57 : }
58 : }
59 : return nullptr;
60 : }
61 :
62 : // -----------------------------------------------------------------------------
63 : // HeapObjectIterator
64 :
65 354711452 : HeapObject* HeapObjectIterator::Next() {
66 301975 : do {
67 354939720 : HeapObject* next_obj = FromCurrentPage();
68 354939720 : if (next_obj != nullptr) return next_obj;
69 : } while (AdvanceToNextPage());
70 : return nullptr;
71 : }
72 :
73 354939720 : HeapObject* HeapObjectIterator::FromCurrentPage() {
74 730257145 : while (cur_addr_ != cur_end_) {
75 750042488 : if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
76 11421 : cur_addr_ = space_->limit();
77 11421 : continue;
78 : }
79 375004029 : HeapObject* obj = HeapObject::FromAddress(cur_addr_);
80 375004029 : const int obj_size = obj->Size();
81 375004029 : cur_addr_ += obj_size;
82 : DCHECK_LE(cur_addr_, cur_end_);
83 375004029 : if (!obj->IsFiller()) {
84 : if (obj->IsCode()) {
85 : DCHECK_EQ(space_, space_->heap()->code_space());
86 : DCHECK_CODEOBJECT_SIZE(obj_size, space_);
87 : } else {
88 : DCHECK_OBJECT_SIZE(obj_size);
89 : }
90 354637745 : return obj;
91 : }
92 : }
93 : return nullptr;
94 : }
95 :
96 : // -----------------------------------------------------------------------------
97 : // MemoryAllocator
98 :
99 : #ifdef ENABLE_HEAP_PROTECTION
100 :
101 : void MemoryAllocator::Protect(Address start, size_t size) {
102 : base::OS::Protect(start, size);
103 : }
104 :
105 :
106 : void MemoryAllocator::Unprotect(Address start, size_t size,
107 : Executability executable) {
108 : base::OS::Unprotect(start, size, executable);
109 : }
110 :
111 :
112 : void MemoryAllocator::ProtectChunkFromPage(Page* page) {
113 : int id = GetChunkId(page);
114 : base::OS::Protect(chunks_[id].address(), chunks_[id].size());
115 : }
116 :
117 :
118 : void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
119 : int id = GetChunkId(page);
120 : base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
121 : chunks_[id].owner()->executable() == EXECUTABLE);
122 : }
123 :
124 : #endif
125 :
126 : // -----------------------------------------------------------------------------
127 : // SemiSpace
128 :
129 : bool SemiSpace::Contains(HeapObject* o) {
130 : return id_ == kToSpace
131 3128716 : ? MemoryChunk::FromAddress(o->address())->InToSpace()
132 3128716 : : MemoryChunk::FromAddress(o->address())->InFromSpace();
133 : }
134 :
135 6257432 : bool SemiSpace::Contains(Object* o) {
136 6257432 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
137 : }
138 :
139 0 : bool SemiSpace::ContainsSlow(Address a) {
140 0 : for (Page* p : *this) {
141 0 : if (p == MemoryChunk::FromAddress(a)) return true;
142 : }
143 0 : return false;
144 : }
145 :
146 : // --------------------------------------------------------------------------
147 : // NewSpace
148 :
149 : bool NewSpace::Contains(HeapObject* o) {
150 24 : return MemoryChunk::FromAddress(o->address())->InNewSpace();
151 : }
152 :
153 : bool NewSpace::Contains(Object* o) {
154 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
155 : }
156 :
157 : bool NewSpace::ContainsSlow(Address a) {
158 : return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
159 : }
160 :
161 : bool NewSpace::ToSpaceContainsSlow(Address a) {
162 0 : return to_space_.ContainsSlow(a);
163 : }
164 :
165 : bool NewSpace::FromSpaceContainsSlow(Address a) {
166 : return from_space_.ContainsSlow(a);
167 : }
168 :
169 3128716 : bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
170 : bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
171 :
172 377998 : Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
173 188999 : SemiSpace* owner) {
174 : DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
175 : bool in_to_space = (owner->id() != kFromSpace);
176 : chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
177 188999 : : MemoryChunk::IN_FROM_SPACE);
178 : DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
179 : : MemoryChunk::IN_TO_SPACE));
180 : Page* page = static_cast<Page*>(chunk);
181 188999 : heap->incremental_marking()->SetNewSpacePageFlags(page);
182 188999 : page->AllocateLocalTracker();
183 188999 : if (FLAG_minor_mc) {
184 : page->AllocateYoungGenerationBitmap();
185 : MarkingState::External(page).ClearLiveness();
186 : }
187 188999 : return page;
188 : }
189 :
190 : // --------------------------------------------------------------------------
191 : // PagedSpace
192 :
193 : template <Page::InitializationMode mode>
194 1011952 : Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
195 : PagedSpace* owner) {
196 : Page* page = reinterpret_cast<Page*>(chunk);
197 : DCHECK(page->area_size() <= kAllocatableMemory);
198 : DCHECK(chunk->owner() == owner);
199 :
200 668 : owner->IncreaseCapacity(page->area_size());
201 505976 : heap->incremental_marking()->SetOldSpacePageFlags(chunk);
202 :
203 : // Make sure that categories are initialized before freeing the area.
204 : page->InitializeFreeListCategories();
205 : // In the case we do not free the memory, we effectively account for the whole
206 : // page as allocated memory that cannot be used for further allocations.
207 : if (mode == kFreeMemory) {
208 : owner->Free(page->area_start(), page->area_size());
209 : }
210 :
211 505976 : return page;
212 : }
213 :
214 668 : Page* Page::ConvertNewToOld(Page* old_page) {
215 : DCHECK(!old_page->is_anchor());
216 : DCHECK(old_page->InNewSpace());
217 2004 : OldSpace* old_space = old_page->heap()->old_space();
218 : old_page->set_owner(old_space);
219 : old_page->SetFlags(0, static_cast<uintptr_t>(~0));
220 : old_space->AccountCommitted(old_page->size());
221 : Page* new_page = Page::Initialize<kDoNotFreeMemory>(
222 668 : old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
223 668 : new_page->InsertAfter(old_space->anchor()->prev_page());
224 668 : return new_page;
225 : }
226 :
227 : void Page::InitializeFreeListCategories() {
228 3036378 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
229 3036378 : categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
230 : }
231 : }
232 :
233 5663003 : bool PagedSpace::Contains(Address addr) {
234 5663003 : return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
235 : }
236 :
237 3129856 : bool PagedSpace::Contains(Object* o) {
238 3129856 : if (!o->IsHeapObject()) return false;
239 3129856 : Page* p = Page::FromAddress(HeapObject::cast(o)->address());
240 3129856 : if (!Page::IsValid(p)) return false;
241 3129856 : return p->owner() == this;
242 : }
243 :
244 : void PagedSpace::UnlinkFreeListCategories(Page* page) {
245 : DCHECK_EQ(this, page->owner());
246 211308 : page->ForAllFreeListCategories([this](FreeListCategory* category) {
247 : DCHECK_EQ(free_list(), category->owner());
248 211308 : free_list()->RemoveCategory(category);
249 211308 : });
250 : }
251 :
252 : intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
253 : DCHECK_EQ(this, page->owner());
254 514870 : intptr_t added = 0;
255 3089220 : page->ForAllFreeListCategories([&added](FreeListCategory* category) {
256 3089220 : added += category->available();
257 3089220 : category->Relink();
258 514870 : });
259 : DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
260 479652 : return added;
261 : }
262 :
263 342793882 : MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
264 : MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
265 289662425 : uintptr_t offset = addr - chunk->address();
266 579270340 : if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
267 53131457 : chunk = heap->lo_space()->FindPageThreadSafe(addr);
268 : }
269 289662426 : return chunk;
270 : }
271 :
272 : Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
273 283999428 : return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
274 : }
275 :
276 : void Page::MarkNeverAllocateForTesting() {
277 : DCHECK(this->owner()->identity() != NEW_SPACE);
278 : DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
279 : SetFlag(NEVER_ALLOCATE_ON_PAGE);
280 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
281 : }
282 :
283 9700 : void Page::MarkEvacuationCandidate() {
284 : DCHECK(!IsFlagSet(NEVER_EVACUATE));
285 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
286 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
287 : SetFlag(EVACUATION_CANDIDATE);
288 9700 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
289 9700 : }
290 :
291 : void Page::ClearEvacuationCandidate() {
292 : if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
293 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
294 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
295 : }
296 : ClearFlag(EVACUATION_CANDIDATE);
297 : InitializeFreeListCategories();
298 : }
299 :
300 1225370 : MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
301 : : heap_(heap),
302 : state_(kOldSpaceState),
303 : old_iterator_(heap->old_space()->begin()),
304 : code_iterator_(heap->code_space()->begin()),
305 : map_iterator_(heap->map_space()->begin()),
306 1470444 : lo_iterator_(heap->lo_space()->begin()) {}
307 :
308 2629318 : MemoryChunk* MemoryChunkIterator::next() {
309 2629318 : switch (state_) {
310 : case kOldSpaceState: {
311 4239634 : if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
312 245074 : state_ = kMapState;
313 : // Fall through.
314 : }
315 : case kMapState: {
316 1508256 : if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
317 245074 : state_ = kCodeState;
318 : // Fall through.
319 : }
320 : case kCodeState: {
321 2387804 : if (code_iterator_ != heap_->code_space()->end())
322 948828 : return *(code_iterator_++);
323 245074 : state_ = kLargeObjectState;
324 : // Fall through.
325 : }
326 : case kLargeObjectState: {
327 270708 : if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
328 245074 : state_ = kFinishedState;
329 : // Fall through;
330 : }
331 : case kFinishedState:
332 : return nullptr;
333 : default:
334 : break;
335 : }
336 0 : UNREACHABLE();
337 : return nullptr;
338 : }
339 :
340 : Page* FreeListCategory::page() {
341 : return Page::FromAddress(reinterpret_cast<Address>(this));
342 : }
343 :
344 : FreeList* FreeListCategory::owner() {
345 : return reinterpret_cast<PagedSpace*>(
346 4231493 : Page::FromAddress(reinterpret_cast<Address>(this))->owner())
347 : ->free_list();
348 : }
349 :
350 0 : bool FreeListCategory::is_linked() {
351 0 : return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
352 : }
353 :
354 : // Try linear allocation in the page of alloc_info's allocation top. Does
355 : // not contain slow case logic (e.g. move to the next page or try free list
356 : // allocation) so it can be used by all the allocation functions and for all
357 : // the paged spaces.
358 : HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
359 700601324 : Address current_top = allocation_info_.top();
360 350300662 : Address new_top = current_top + size_in_bytes;
361 350300662 : if (new_top > allocation_info_.limit()) return NULL;
362 :
363 : allocation_info_.set_top(new_top);
364 348069499 : return HeapObject::FromAddress(current_top);
365 : }
366 :
367 :
368 15102200 : AllocationResult LocalAllocationBuffer::AllocateRawAligned(
369 : int size_in_bytes, AllocationAlignment alignment) {
370 30218395 : Address current_top = allocation_info_.top();
371 15102200 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
372 :
373 15116195 : Address new_top = current_top + filler_size + size_in_bytes;
374 15116195 : if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
375 :
376 : allocation_info_.set_top(new_top);
377 14943180 : if (filler_size > 0) {
378 : return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
379 0 : filler_size);
380 : }
381 :
382 14943180 : return AllocationResult(HeapObject::FromAddress(current_top));
383 : }
384 :
385 :
386 : HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
387 : AllocationAlignment alignment) {
388 : Address current_top = allocation_info_.top();
389 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
390 :
391 : Address new_top = current_top + filler_size + *size_in_bytes;
392 : if (new_top > allocation_info_.limit()) return NULL;
393 :
394 : allocation_info_.set_top(new_top);
395 : if (filler_size > 0) {
396 : *size_in_bytes += filler_size;
397 : return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
398 : filler_size);
399 : }
400 :
401 : return HeapObject::FromAddress(current_top);
402 : }
403 :
404 :
405 : // Raw allocation.
406 350300662 : AllocationResult PagedSpace::AllocateRawUnaligned(
407 : int size_in_bytes, UpdateSkipList update_skip_list) {
408 : HeapObject* object = AllocateLinearly(size_in_bytes);
409 :
410 350300662 : if (object == NULL) {
411 2233268 : object = free_list_.Allocate(size_in_bytes);
412 2233264 : if (object == NULL) {
413 555185 : object = SlowAllocateRaw(size_in_bytes);
414 : }
415 300177419 : if (object != NULL && heap()->incremental_marking()->black_allocation()) {
416 1070 : Address start = object->address();
417 1070 : Address end = object->address() + size_in_bytes;
418 1070 : Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
419 : }
420 : }
421 :
422 350302201 : if (object != NULL) {
423 648234087 : if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
424 2738996 : SkipList::Update(object->address(), size_in_bytes);
425 : }
426 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
427 350296842 : return object;
428 : }
429 :
430 : return AllocationResult::Retry(identity());
431 : }
432 :
433 :
434 : AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
435 : int size_in_bytes) {
436 : base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
437 : return AllocateRawUnaligned(size_in_bytes);
438 : }
439 :
440 :
441 : // Raw allocation.
442 : AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
443 : AllocationAlignment alignment) {
444 : DCHECK(identity() == OLD_SPACE);
445 : int allocation_size = size_in_bytes;
446 : HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
447 :
448 : if (object == NULL) {
449 : // We don't know exactly how much filler we need to align until space is
450 : // allocated, so assume the worst case.
451 : int filler_size = Heap::GetMaximumFillToAlign(alignment);
452 : allocation_size += filler_size;
453 : object = free_list_.Allocate(allocation_size);
454 : if (object == NULL) {
455 : object = SlowAllocateRaw(allocation_size);
456 : }
457 : if (object != NULL) {
458 : if (heap()->incremental_marking()->black_allocation()) {
459 : Address start = object->address();
460 : Address end = object->address() + allocation_size;
461 : Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
462 : }
463 : if (filler_size != 0) {
464 : object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
465 : alignment);
466 : // Filler objects are initialized, so mark only the aligned object
467 : // memory as uninitialized.
468 : allocation_size = size_in_bytes;
469 : }
470 : }
471 : }
472 :
473 : if (object != NULL) {
474 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
475 : return object;
476 : }
477 :
478 : return AllocationResult::Retry(identity());
479 : }
480 :
481 :
482 263492422 : AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
483 : AllocationAlignment alignment) {
484 : #ifdef V8_HOST_ARCH_32_BIT
485 : AllocationResult result =
486 : alignment == kDoubleAligned
487 : ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
488 : : AllocateRawUnaligned(size_in_bytes);
489 : #else
490 263492422 : AllocationResult result = AllocateRawUnaligned(size_in_bytes);
491 : #endif
492 : HeapObject* heap_obj = nullptr;
493 526985978 : if (!result.IsRetry() && result.To(&heap_obj)) {
494 263492398 : AllocationStep(heap_obj->address(), size_in_bytes);
495 : }
496 263520143 : return result;
497 : }
498 :
499 :
500 : // -----------------------------------------------------------------------------
501 : // NewSpace
502 :
503 :
504 : AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
505 : AllocationAlignment alignment) {
506 : Address top = allocation_info_.top();
507 : int filler_size = Heap::GetFillToAlign(top, alignment);
508 : int aligned_size_in_bytes = size_in_bytes + filler_size;
509 :
510 : if (allocation_info_.limit() - top < aligned_size_in_bytes) {
511 : // See if we can create room.
512 : if (!EnsureAllocation(size_in_bytes, alignment)) {
513 : return AllocationResult::Retry();
514 : }
515 :
516 : top = allocation_info_.top();
517 : filler_size = Heap::GetFillToAlign(top, alignment);
518 : aligned_size_in_bytes = size_in_bytes + filler_size;
519 : }
520 :
521 : HeapObject* obj = HeapObject::FromAddress(top);
522 : allocation_info_.set_top(top + aligned_size_in_bytes);
523 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
524 :
525 : if (filler_size > 0) {
526 : obj = heap()->PrecedeWithFiller(obj, filler_size);
527 : }
528 :
529 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
530 :
531 : return obj;
532 : }
533 :
534 :
535 : AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
536 425053844 : Address top = allocation_info_.top();
537 424815006 : if (allocation_info_.limit() < top + size_in_bytes) {
538 : // See if we can create room.
539 306614 : if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
540 67776 : return AllocationResult::Retry();
541 : }
542 :
543 : top = allocation_info_.top();
544 : }
545 :
546 424747230 : HeapObject* obj = HeapObject::FromAddress(top);
547 424747238 : allocation_info_.set_top(top + size_in_bytes);
548 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
549 :
550 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
551 :
552 424747238 : return obj;
553 : }
554 :
555 :
556 : AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
557 : AllocationAlignment alignment) {
558 : #ifdef V8_HOST_ARCH_32_BIT
559 : return alignment == kDoubleAligned
560 : ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
561 : : AllocateRawUnaligned(size_in_bytes);
562 : #else
563 : return AllocateRawUnaligned(size_in_bytes);
564 : #endif
565 : }
566 :
567 :
568 487360 : MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
569 : int size_in_bytes, AllocationAlignment alignment) {
570 487360 : base::LockGuard<base::Mutex> guard(&mutex_);
571 487458 : return AllocateRaw(size_in_bytes, alignment);
572 : }
573 :
574 132228 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
575 : Executability executable, Space* owner) {
576 21465 : if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
577 : STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
578 0 : FATAL("Code page is too large.");
579 : }
580 20902 : heap->incremental_marking()->SetOldSpacePageFlags(chunk);
581 :
582 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
583 :
584 : // Initialize the owner field for each contained page (except the first, which
585 : // is initialized by MemoryChunk::Initialize).
586 179722 : for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
587 : addr < chunk->area_end(); addr += Page::kPageSize) {
588 : // Clear out kPageHeaderTag.
589 68959 : Memory::Address_at(addr) = 0;
590 : }
591 :
592 20902 : return static_cast<LargePage*>(chunk);
593 : }
594 :
595 142131 : size_t LargeObjectSpace::Available() {
596 284262 : return ObjectSizeFor(heap()->memory_allocator()->Available());
597 : }
598 :
599 :
600 : LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
601 59260 : return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
602 : }
603 :
604 :
605 207839 : LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
606 : AllocationResult result,
607 : intptr_t size) {
608 207839 : if (result.IsRetry()) return InvalidBuffer();
609 : HeapObject* obj = nullptr;
610 : bool ok = result.To(&obj);
611 : USE(ok);
612 : DCHECK(ok);
613 207839 : Address top = HeapObject::cast(obj)->address();
614 415678 : return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
615 : }
616 :
617 :
618 : bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
619 207829 : if (allocation_info_.top() == other->allocation_info_.limit()) {
620 : allocation_info_.set_top(other->allocation_info_.top());
621 : other->allocation_info_.Reset(nullptr, nullptr);
622 : return true;
623 : }
624 : return false;
625 : }
626 :
627 : } // namespace internal
628 : } // namespace v8
629 :
630 : #endif // V8_HEAP_SPACES_INL_H_
|