Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_INL_H_
6 : #define V8_HEAP_SPACES_INL_H_
7 :
8 : #include "src/heap/spaces.h"
9 :
10 : #include "src/base/atomic-utils.h"
11 : #include "src/base/bounded-page-allocator.h"
12 : #include "src/base/v8-fallthrough.h"
13 : #include "src/heap/heap-inl.h"
14 : #include "src/heap/incremental-marking.h"
15 : #include "src/msan.h"
16 : #include "src/objects/code-inl.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : template <class PAGE_TYPE>
22 : PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
23 574772 : p_ = p_->next_page();
24 : return *this;
25 : }
26 :
27 : template <class PAGE_TYPE>
28 : PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
29 : PageIteratorImpl<PAGE_TYPE> tmp(*this);
30 : operator++();
31 : return tmp;
32 : }
33 :
34 : PageRange::PageRange(Address start, Address limit)
35 : : begin_(Page::FromAddress(start)),
36 : end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
37 : #ifdef DEBUG
38 : if (begin_->InNewSpace()) {
39 : SemiSpace::AssertValidRange(start, limit);
40 : }
41 : #endif // DEBUG
42 : }
43 :
44 : // -----------------------------------------------------------------------------
45 : // SemiSpaceIterator
46 :
47 2075452 : HeapObject SemiSpaceIterator::Next() {
48 2405816 : while (current_ != limit_) {
49 2232814 : if (Page::IsAlignedToPageSize(current_)) {
50 : Page* page = Page::FromAllocationAreaAddress(current_);
51 : page = page->next_page();
52 : DCHECK(page);
53 4 : current_ = page->area_start();
54 4 : if (current_ == limit_) return HeapObject();
55 : }
56 4465628 : HeapObject object = HeapObject::FromAddress(current_);
57 2232814 : current_ += object->Size();
58 2232814 : if (!object->IsFiller()) {
59 2067632 : return object;
60 : }
61 : }
62 7820 : return HeapObject();
63 : }
64 :
65 : // -----------------------------------------------------------------------------
66 : // HeapObjectIterator
67 :
68 88004865 : HeapObject HeapObjectIterator::Next() {
69 85633 : do {
70 88058770 : HeapObject next_obj = FromCurrentPage();
71 88058874 : if (!next_obj.is_null()) return next_obj;
72 : } while (AdvanceToNextPage());
73 31728 : return HeapObject();
74 : }
75 :
76 88058875 : HeapObject HeapObjectIterator::FromCurrentPage() {
77 88568027 : while (cur_addr_ != cur_end_) {
78 176975732 : if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
79 10906 : cur_addr_ = space_->limit();
80 10906 : continue;
81 : }
82 88471488 : HeapObject obj = HeapObject::FromAddress(cur_addr_);
83 88471488 : const int obj_size = obj->Size();
84 88471489 : cur_addr_ += obj_size;
85 : DCHECK_LE(cur_addr_, cur_end_);
86 88471489 : if (!obj->IsFiller()) {
87 : if (obj->IsCode()) {
88 : DCHECK_EQ(space_, space_->heap()->code_space());
89 : DCHECK_CODEOBJECT_SIZE(obj_size, space_);
90 : } else {
91 : DCHECK_OBJECT_SIZE(obj_size);
92 : }
93 87973243 : return obj;
94 : }
95 : }
96 85633 : return HeapObject();
97 : }
98 :
99 1713317 : void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
100 : size_t amount) {
101 1713317 : base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
102 : heap()->IncrementExternalBackingStoreBytes(type, amount);
103 1713317 : }
104 :
105 370347 : void Space::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
106 : size_t amount) {
107 370347 : base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
108 : heap()->DecrementExternalBackingStoreBytes(type, amount);
109 370347 : }
110 :
111 28495 : void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
112 : Space* from, Space* to,
113 : size_t amount) {
114 28495 : if (from == to) return;
115 :
116 24144 : base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
117 24144 : base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
118 : }
119 :
120 : // -----------------------------------------------------------------------------
121 : // SemiSpace
122 :
123 : bool SemiSpace::Contains(HeapObject o) {
124 : MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
125 1934232 : if (memory_chunk->IsLargePage()) return false;
126 1934227 : return id_ == kToSpace ? memory_chunk->IsToPage()
127 1934227 : : memory_chunk->IsFromPage();
128 : }
129 :
130 : bool SemiSpace::Contains(Object o) {
131 3868464 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
132 : }
133 :
134 : bool SemiSpace::ContainsSlow(Address a) {
135 1335 : for (Page* p : *this) {
136 1320 : if (p == MemoryChunk::FromAddress(a)) return true;
137 : }
138 : return false;
139 : }
140 :
141 : // --------------------------------------------------------------------------
142 : // NewSpace
143 :
144 : bool NewSpace::Contains(Object o) {
145 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
146 : }
147 :
148 : bool NewSpace::Contains(HeapObject o) {
149 : return MemoryChunk::FromHeapObject(o)->InNewSpace();
150 : }
151 :
152 : bool NewSpace::ContainsSlow(Address a) {
153 20 : return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
154 : }
155 :
156 : bool NewSpace::ToSpaceContainsSlow(Address a) {
157 : return to_space_.ContainsSlow(a);
158 : }
159 :
160 : bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
161 : bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
162 :
163 5 : bool PagedSpace::Contains(Address addr) {
164 5 : return MemoryChunk::FromAnyPointerAddress(addr)->owner() == this;
165 : }
166 :
167 : bool PagedSpace::Contains(Object o) {
168 3891028 : if (!o.IsHeapObject()) return false;
169 3891028 : return Page::FromAddress(o.ptr())->owner() == this;
170 : }
171 :
172 : void PagedSpace::UnlinkFreeListCategories(Page* page) {
173 : DCHECK_EQ(this, page->owner());
174 769086 : page->ForAllFreeListCategories([this](FreeListCategory* category) {
175 : DCHECK_EQ(free_list(), category->owner());
176 : category->set_free_list(nullptr);
177 6 : free_list()->RemoveCategory(category);
178 128181 : });
179 : }
180 :
181 : size_t PagedSpace::RelinkFreeListCategories(Page* page) {
182 : DCHECK_EQ(this, page->owner());
183 982275 : size_t added = 0;
184 11787288 : page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
185 5893644 : category->set_free_list(&free_list_);
186 5893644 : added += category->available();
187 : category->Relink();
188 982275 : });
189 : DCHECK_EQ(page->AvailableInFreeList(),
190 : page->AvailableInFreeListFromAllocatedBytes());
191 982275 : return added;
192 : }
193 :
194 : bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
195 204097 : if (allocation_info_.top() != kNullAddress) {
196 : const Address object_address = object->address();
197 204097 : if ((allocation_info_.top() - object_size) == object_address) {
198 : allocation_info_.set_top(object_address);
199 : return true;
200 : }
201 : }
202 : return false;
203 : }
204 :
205 27519243 : bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
206 : Address base = BaseAddress(slot_addr);
207 27519243 : if (slot_addr < base + kHeaderSize) return false;
208 82533993 : return HeapObject::FromAddress(base) ==
209 : ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
210 : }
211 :
212 : MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
213 27519243 : while (!HasHeaderSentinel(addr)) {
214 11768605 : addr = BaseAddress(addr) - 1;
215 : }
216 : return FromAddress(addr);
217 : }
218 :
219 603844 : void MemoryChunk::IncrementExternalBackingStoreBytes(
220 : ExternalBackingStoreType type, size_t amount) {
221 603844 : base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
222 603844 : owner()->IncrementExternalBackingStoreBytes(type, amount);
223 603837 : }
224 :
225 109020 : void MemoryChunk::DecrementExternalBackingStoreBytes(
226 : ExternalBackingStoreType type, size_t amount) {
227 109020 : base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
228 109020 : owner()->DecrementExternalBackingStoreBytes(type, amount);
229 109023 : }
230 :
231 28491 : void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
232 : MemoryChunk* from,
233 : MemoryChunk* to,
234 : size_t amount) {
235 28491 : base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
236 28491 : base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
237 : Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
238 28491 : amount);
239 28496 : }
240 :
241 165 : void Page::MarkNeverAllocateForTesting() {
242 : DCHECK(this->owner()->identity() != NEW_SPACE);
243 : DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
244 : SetFlag(NEVER_ALLOCATE_ON_PAGE);
245 : SetFlag(NEVER_EVACUATE);
246 165 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
247 165 : }
248 :
249 7077 : void Page::MarkEvacuationCandidate() {
250 : DCHECK(!IsFlagSet(NEVER_EVACUATE));
251 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
252 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
253 : SetFlag(EVACUATION_CANDIDATE);
254 7077 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
255 7077 : }
256 :
257 : void Page::ClearEvacuationCandidate() {
258 : if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
259 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
260 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
261 : }
262 : ClearFlag(EVACUATION_CANDIDATE);
263 91 : InitializeFreeListCategories();
264 : }
265 :
266 : HeapObject LargePage::GetObject() {
267 : return HeapObject::FromAddress(area_start());
268 : }
269 :
270 : OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
271 : : heap_(heap),
272 : state_(kOldSpaceState),
273 : old_iterator_(heap->old_space()->begin()),
274 : code_iterator_(heap->code_space()->begin()),
275 : map_iterator_(heap->map_space()->begin()),
276 : lo_iterator_(heap->lo_space()->begin()),
277 41984 : code_lo_iterator_(heap->code_lo_space()->begin()) {}
278 :
279 562847 : MemoryChunk* OldGenerationMemoryChunkIterator::next() {
280 562847 : switch (state_) {
281 : case kOldSpaceState: {
282 275566 : if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
283 41984 : state_ = kMapState;
284 : V8_FALLTHROUGH;
285 : }
286 : case kMapState: {
287 139875 : if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
288 41984 : state_ = kCodeState;
289 : V8_FALLTHROUGH;
290 : }
291 : case kCodeState: {
292 113225 : if (code_iterator_ != heap_->code_space()->end())
293 : return *(code_iterator_++);
294 41984 : state_ = kLargeObjectState;
295 : V8_FALLTHROUGH;
296 : }
297 : case kLargeObjectState: {
298 90170 : if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
299 41984 : state_ = kCodeLargeObjectState;
300 : V8_FALLTHROUGH;
301 : }
302 : case kCodeLargeObjectState: {
303 111947 : if (code_lo_iterator_ != heap_->code_lo_space()->end())
304 : return *(code_lo_iterator_++);
305 41984 : state_ = kFinishedState;
306 : V8_FALLTHROUGH;
307 : }
308 : case kFinishedState:
309 : return nullptr;
310 : default:
311 : break;
312 : }
313 0 : UNREACHABLE();
314 : }
315 :
316 : Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
317 127885 : return top(type) ? top(type)->page() : nullptr;
318 : }
319 :
320 : FreeList* FreeListCategory::owner() { return free_list_; }
321 :
322 : bool FreeListCategory::is_linked() {
323 0 : return prev_ != nullptr || next_ != nullptr;
324 : }
325 :
326 81368576 : AllocationResult LocalAllocationBuffer::AllocateRawAligned(
327 : int size_in_bytes, AllocationAlignment alignment) {
328 : Address current_top = allocation_info_.top();
329 81368576 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
330 :
331 81282298 : Address new_top = current_top + filler_size + size_in_bytes;
332 81282298 : if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
333 :
334 : allocation_info_.set_top(new_top);
335 79807216 : if (filler_size > 0) {
336 0 : return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
337 0 : filler_size);
338 : }
339 :
340 79807216 : return AllocationResult(HeapObject::FromAddress(current_top));
341 : }
342 :
343 0 : bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
344 497475516 : if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
345 0 : return true;
346 : }
347 1284400 : return SlowRefillLinearAllocationArea(size_in_bytes);
348 : }
349 :
350 0 : HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
351 0 : Address current_top = allocation_info_.top();
352 248775239 : Address new_top = current_top + size_in_bytes;
353 : DCHECK_LE(new_top, allocation_info_.limit());
354 0 : allocation_info_.set_top(new_top);
355 0 : return HeapObject::FromAddress(current_top);
356 : }
357 :
358 55 : HeapObject PagedSpace::TryAllocateLinearlyAligned(
359 : int* size_in_bytes, AllocationAlignment alignment) {
360 : Address current_top = allocation_info_.top();
361 55 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
362 :
363 55 : Address new_top = current_top + filler_size + *size_in_bytes;
364 55 : if (new_top > allocation_info_.limit()) return HeapObject();
365 :
366 : allocation_info_.set_top(new_top);
367 35 : if (filler_size > 0) {
368 10 : *size_in_bytes += filler_size;
369 : return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
370 10 : filler_size);
371 : }
372 :
373 : return HeapObject::FromAddress(current_top);
374 : }
375 :
376 248737738 : AllocationResult PagedSpace::AllocateRawUnaligned(
377 : int size_in_bytes, UpdateSkipList update_skip_list) {
378 : DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
379 248786564 : if (!EnsureLinearAllocationArea(size_in_bytes)) {
380 0 : return AllocationResult::Retry(identity());
381 : }
382 0 : HeapObject object = AllocateLinearly(size_in_bytes);
383 : DCHECK(!object.is_null());
384 248775239 : if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
385 1905001 : SkipList::Update(object->address(), size_in_bytes);
386 : }
387 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
388 248842530 : return object;
389 : }
390 :
391 :
392 35 : AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
393 : AllocationAlignment alignment) {
394 : DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
395 : DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
396 35 : int allocation_size = size_in_bytes;
397 35 : HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
398 35 : if (object.is_null()) {
399 : // We don't know exactly how much filler we need to align until space is
400 : // allocated, so assume the worst case.
401 20 : int filler_size = Heap::GetMaximumFillToAlign(alignment);
402 20 : allocation_size += filler_size;
403 20 : if (!EnsureLinearAllocationArea(allocation_size)) {
404 : return AllocationResult::Retry(identity());
405 : }
406 20 : allocation_size = size_in_bytes;
407 20 : object = TryAllocateLinearlyAligned(&allocation_size, alignment);
408 : DCHECK(!object.is_null());
409 : }
410 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
411 35 : return object;
412 : }
413 :
414 :
415 191650286 : AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
416 : AllocationAlignment alignment) {
417 213238140 : if (top_on_previous_step_ && top() < top_on_previous_step_ &&
418 5 : SupportsInlineAllocation()) {
419 : // Generated code decreased the top() pointer to do folded allocations.
420 : // The top_on_previous_step_ can be one byte beyond the current page.
421 : DCHECK_NE(top(), kNullAddress);
422 : DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
423 : Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
424 5 : top_on_previous_step_ = top();
425 : }
426 : size_t bytes_since_last =
427 213238133 : top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
428 :
429 : DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
430 : #ifdef V8_HOST_ARCH_32_BIT
431 : AllocationResult result = alignment != kWordAligned
432 : ? AllocateRawAligned(size_in_bytes, alignment)
433 : : AllocateRawUnaligned(size_in_bytes);
434 : #else
435 191650286 : AllocationResult result = AllocateRawUnaligned(size_in_bytes);
436 : #endif
437 0 : HeapObject heap_obj;
438 383349231 : if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
439 124619578 : AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
440 124619578 : heap_obj->address(), size_in_bytes);
441 124619511 : StartNextInlineAllocationStep();
442 : DCHECK_IMPLIES(
443 : heap()->incremental_marking()->black_allocation(),
444 : heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
445 : }
446 191611153 : return result;
447 : }
448 :
449 :
450 : // -----------------------------------------------------------------------------
451 : // NewSpace
452 :
453 :
454 : AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
455 : AllocationAlignment alignment) {
456 : Address top = allocation_info_.top();
457 30 : int filler_size = Heap::GetFillToAlign(top, alignment);
458 30 : int aligned_size_in_bytes = size_in_bytes + filler_size;
459 :
460 30 : if (allocation_info_.limit() - top <
461 : static_cast<uintptr_t>(aligned_size_in_bytes)) {
462 : // See if we can create room.
463 0 : if (!EnsureAllocation(size_in_bytes, alignment)) {
464 : return AllocationResult::Retry();
465 : }
466 :
467 : top = allocation_info_.top();
468 0 : filler_size = Heap::GetFillToAlign(top, alignment);
469 0 : aligned_size_in_bytes = size_in_bytes + filler_size;
470 : }
471 :
472 : HeapObject obj = HeapObject::FromAddress(top);
473 30 : allocation_info_.set_top(top + aligned_size_in_bytes);
474 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
475 :
476 30 : if (filler_size > 0) {
477 10 : obj = heap()->PrecedeWithFiller(obj, filler_size);
478 : }
479 :
480 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
481 :
482 : return obj;
483 : }
484 :
485 :
486 : AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
487 0 : Address top = allocation_info_.top();
488 176971601 : if (allocation_info_.limit() < top + size_in_bytes) {
489 : // See if we can create room.
490 350504 : if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
491 0 : return AllocationResult::Retry();
492 : }
493 :
494 0 : top = allocation_info_.top();
495 : }
496 :
497 0 : HeapObject obj = HeapObject::FromAddress(top);
498 176956145 : allocation_info_.set_top(top + size_in_bytes);
499 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
500 :
501 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
502 :
503 0 : return obj;
504 : }
505 :
506 :
507 : AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
508 : AllocationAlignment alignment) {
509 176971368 : if (top() < top_on_previous_step_) {
510 : // Generated code decreased the top() pointer to do folded allocations
511 : DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
512 : Page::FromAllocationAreaAddress(top_on_previous_step_));
513 6 : top_on_previous_step_ = top();
514 : }
515 : #ifdef V8_HOST_ARCH_32_BIT
516 : return alignment != kWordAligned
517 : ? AllocateRawAligned(size_in_bytes, alignment)
518 : : AllocateRawUnaligned(size_in_bytes);
519 : #else
520 : #ifdef V8_COMPRESS_POINTERS
521 : // TODO(ishell, v8:8875): Consider using aligned allocations once the
522 : // allocation alignment inconsistency is fixed. For now we keep using
523 : // unaligned access since both x64 and arm64 architectures (where pointer
524 : // compression is supported) allow unaligned access to doubles and full words.
525 : #endif // V8_COMPRESS_POINTERS
526 : return AllocateRawUnaligned(size_in_bytes);
527 : #endif
528 : }
529 :
530 195248 : V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
531 : int size_in_bytes, AllocationAlignment alignment) {
532 195248 : base::MutexGuard guard(&mutex_);
533 195311 : return AllocateRaw(size_in_bytes, alignment);
534 : }
535 :
536 148133 : LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
537 : AllocationResult result,
538 : intptr_t size) {
539 148133 : if (result.IsRetry()) return InvalidBuffer();
540 : HeapObject obj;
541 : bool ok = result.To(&obj);
542 : USE(ok);
543 : DCHECK(ok);
544 : Address top = HeapObject::cast(obj)->address();
545 295840 : return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
546 : }
547 :
548 :
549 : bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
550 147875 : if (allocation_info_.top() == other->allocation_info_.limit()) {
551 : allocation_info_.set_top(other->allocation_info_.top());
552 : other->allocation_info_.Reset(kNullAddress, kNullAddress);
553 : return true;
554 : }
555 : return false;
556 : }
557 :
558 : bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
559 50641 : if (IsValid()) {
560 : const Address object_address = object->address();
561 50638 : if ((allocation_info_.top() - object_size) == object_address) {
562 : allocation_info_.set_top(object_address);
563 : return true;
564 : }
565 : }
566 : return false;
567 : }
568 :
569 : } // namespace internal
570 : } // namespace v8
571 :
572 : #endif // V8_HEAP_SPACES_INL_H_
|