Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_INL_H_
6 : #define V8_HEAP_SPACES_INL_H_
7 :
8 : #include "src/heap/spaces.h"
9 :
10 : #include "src/base/atomic-utils.h"
11 : #include "src/base/bounded-page-allocator.h"
12 : #include "src/base/v8-fallthrough.h"
13 : #include "src/heap/heap-inl.h"
14 : #include "src/heap/incremental-marking.h"
15 : #include "src/msan.h"
16 : #include "src/objects/code-inl.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : template <class PAGE_TYPE>
22 : PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
23 1576755 : p_ = p_->next_page();
24 : return *this;
25 : }
26 :
27 : template <class PAGE_TYPE>
28 : PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
29 : PageIteratorImpl<PAGE_TYPE> tmp(*this);
30 : operator++();
31 : return tmp;
32 : }
33 :
34 : PageRange::PageRange(Address start, Address limit)
35 : : begin_(Page::FromAddress(start)),
36 : end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
37 : #ifdef DEBUG
38 : if (begin_->InNewSpace()) {
39 : SemiSpace::AssertValidRange(start, limit);
40 : }
41 : #endif // DEBUG
42 : }
43 :
44 : // -----------------------------------------------------------------------------
45 : // SemiSpaceIterator
46 :
47 2963146 : HeapObject SemiSpaceIterator::Next() {
48 3156492 : while (current_ != limit_) {
49 3051944 : if (Page::IsAlignedToPageSize(current_)) {
50 : Page* page = Page::FromAllocationAreaAddress(current_);
51 : page = page->next_page();
52 : DCHECK(page);
53 76 : current_ = page->area_start();
54 76 : if (current_ == limit_) return HeapObject();
55 : }
56 6103888 : HeapObject object = HeapObject::FromAddress(current_);
57 3051944 : current_ += object->Size();
58 3051944 : if (!object->IsFiller()) {
59 2955271 : return object;
60 : }
61 : }
62 7875 : return HeapObject();
63 : }
64 :
65 : // -----------------------------------------------------------------------------
66 : // HeapObjectIterator
67 :
68 88605693 : HeapObject HeapObjectIterator::Next() {
69 89375 : do {
70 88663120 : HeapObject next_obj = FromCurrentPage();
71 88663120 : if (!next_obj.is_null()) return next_obj;
72 : } while (AdvanceToNextPage());
73 31948 : return HeapObject();
74 : }
75 :
76 88663120 : HeapObject HeapObjectIterator::FromCurrentPage() {
77 89171633 : while (cur_addr_ != cur_end_) {
78 178175563 : if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
79 11006 : cur_addr_ = space_->limit();
80 11006 : continue;
81 : }
82 89071252 : HeapObject obj = HeapObject::FromAddress(cur_addr_);
83 89071252 : const int obj_size = obj->Size();
84 89071252 : cur_addr_ += obj_size;
85 : DCHECK_LE(cur_addr_, cur_end_);
86 89071252 : if (!obj->IsFiller()) {
87 : if (obj->IsCode()) {
88 : DCHECK_EQ(space_, space_->heap()->code_space());
89 : DCHECK_CODEOBJECT_SIZE(obj_size, space_);
90 : } else {
91 : DCHECK_OBJECT_SIZE(obj_size);
92 : }
93 88573745 : return obj;
94 : }
95 : }
96 89375 : return HeapObject();
97 : }
98 :
99 1889920 : void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
100 : size_t amount) {
101 1889920 : base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
102 : heap()->IncrementExternalBackingStoreBytes(type, amount);
103 1889920 : }
104 :
105 457384 : void Space::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
106 : size_t amount) {
107 457384 : base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
108 : heap()->DecrementExternalBackingStoreBytes(type, amount);
109 457384 : }
110 :
111 31834 : void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
112 : Space* from, Space* to,
113 : size_t amount) {
114 31834 : if (from == to) return;
115 :
116 28586 : base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
117 28586 : base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
118 : }
119 :
120 : // -----------------------------------------------------------------------------
121 : // SemiSpace
122 :
123 : bool SemiSpace::Contains(HeapObject o) {
124 : MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
125 1934489 : if (memory_chunk->IsLargePage()) return false;
126 1934484 : return id_ == kToSpace ? memory_chunk->IsToPage()
127 1934484 : : memory_chunk->IsFromPage();
128 : }
129 :
130 : bool SemiSpace::Contains(Object o) {
131 3868978 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
132 : }
133 :
134 : bool SemiSpace::ContainsSlow(Address a) {
135 1335 : for (Page* p : *this) {
136 1320 : if (p == MemoryChunk::FromAddress(a)) return true;
137 : }
138 : return false;
139 : }
140 :
141 : // --------------------------------------------------------------------------
142 : // NewSpace
143 :
144 : bool NewSpace::Contains(Object o) {
145 : return o->IsHeapObject() && Contains(HeapObject::cast(o));
146 : }
147 :
148 : bool NewSpace::Contains(HeapObject o) {
149 : return MemoryChunk::FromHeapObject(o)->InNewSpace();
150 : }
151 :
152 : bool NewSpace::ContainsSlow(Address a) {
153 20 : return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
154 : }
155 :
156 : bool NewSpace::ToSpaceContainsSlow(Address a) {
157 : return to_space_.ContainsSlow(a);
158 : }
159 :
160 : bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
161 : bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
162 :
163 5 : bool PagedSpace::Contains(Address addr) {
164 5 : return MemoryChunk::FromAnyPointerAddress(addr)->owner() == this;
165 : }
166 :
167 : bool PagedSpace::Contains(Object o) {
168 3891492 : if (!o.IsHeapObject()) return false;
169 3891492 : return Page::FromAddress(o.ptr())->owner() == this;
170 : }
171 :
172 : void PagedSpace::UnlinkFreeListCategories(Page* page) {
173 : DCHECK_EQ(this, page->owner());
174 1023810 : page->ForAllFreeListCategories([this](FreeListCategory* category) {
175 : DCHECK_EQ(free_list(), category->owner());
176 : category->set_free_list(nullptr);
177 6 : free_list()->RemoveCategory(category);
178 170635 : });
179 : }
180 :
181 : size_t PagedSpace::RelinkFreeListCategories(Page* page) {
182 : DCHECK_EQ(this, page->owner());
183 1070974 : size_t added = 0;
184 12851664 : page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
185 6425832 : category->set_free_list(&free_list_);
186 6425832 : added += category->available();
187 : category->Relink();
188 1070974 : });
189 :
190 : DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
191 : page->AvailableInFreeList() ==
192 : page->AvailableInFreeListFromAllocatedBytes());
193 1070974 : return added;
194 : }
195 :
196 : bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
197 239956 : if (allocation_info_.top() != kNullAddress) {
198 : const Address object_address = object->address();
199 239956 : if ((allocation_info_.top() - object_size) == object_address) {
200 : allocation_info_.set_top(object_address);
201 : return true;
202 : }
203 : }
204 : return false;
205 : }
206 :
207 51004654 : bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
208 : Address base = BaseAddress(slot_addr);
209 51004654 : if (slot_addr < base + kHeaderSize) return false;
210 50991319 : return HeapObject::FromAddress(base) ==
211 : ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
212 : }
213 :
214 : MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
215 51004654 : while (!HasHeaderSentinel(addr)) {
216 31576938 : addr = BaseAddress(addr) - 1;
217 : }
218 : return FromAddress(addr);
219 : }
220 :
221 607503 : void MemoryChunk::IncrementExternalBackingStoreBytes(
222 : ExternalBackingStoreType type, size_t amount) {
223 607503 : base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
224 607503 : owner()->IncrementExternalBackingStoreBytes(type, amount);
225 607510 : }
226 :
227 107727 : void MemoryChunk::DecrementExternalBackingStoreBytes(
228 : ExternalBackingStoreType type, size_t amount) {
229 107727 : base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
230 107727 : owner()->DecrementExternalBackingStoreBytes(type, amount);
231 107727 : }
232 :
233 31829 : void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
234 : MemoryChunk* from,
235 : MemoryChunk* to,
236 : size_t amount) {
237 31829 : base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
238 31829 : base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
239 : Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
240 31829 : amount);
241 31832 : }
242 :
243 165 : void Page::MarkNeverAllocateForTesting() {
244 : DCHECK(this->owner()->identity() != NEW_SPACE);
245 : DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
246 : SetFlag(NEVER_ALLOCATE_ON_PAGE);
247 : SetFlag(NEVER_EVACUATE);
248 165 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
249 165 : }
250 :
251 9834 : void Page::MarkEvacuationCandidate() {
252 : DCHECK(!IsFlagSet(NEVER_EVACUATE));
253 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
254 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
255 : SetFlag(EVACUATION_CANDIDATE);
256 9834 : reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
257 9834 : }
258 :
259 : void Page::ClearEvacuationCandidate() {
260 : if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
261 : DCHECK_NULL(slot_set<OLD_TO_OLD>());
262 : DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
263 : }
264 : ClearFlag(EVACUATION_CANDIDATE);
265 103 : InitializeFreeListCategories();
266 : }
267 :
268 : HeapObject LargePage::GetObject() {
269 : return HeapObject::FromAddress(area_start());
270 : }
271 :
272 : OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
273 : : heap_(heap),
274 : state_(kOldSpaceState),
275 : old_iterator_(heap->old_space()->begin()),
276 : code_iterator_(heap->code_space()->begin()),
277 : map_iterator_(heap->map_space()->begin()),
278 : lo_iterator_(heap->lo_space()->begin()),
279 52235 : code_lo_iterator_(heap->code_lo_space()->begin()) {}
280 :
281 1571559 : MemoryChunk* OldGenerationMemoryChunkIterator::next() {
282 1571559 : switch (state_) {
283 : case kOldSpaceState: {
284 1219918 : if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
285 52235 : state_ = kMapState;
286 : V8_FALLTHROUGH;
287 : }
288 : case kMapState: {
289 183782 : if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
290 52235 : state_ = kCodeState;
291 : V8_FALLTHROUGH;
292 : }
293 : case kCodeState: {
294 138853 : if (code_iterator_ != heap_->code_space()->end())
295 : return *(code_iterator_++);
296 52235 : state_ = kLargeObjectState;
297 : V8_FALLTHROUGH;
298 : }
299 : case kLargeObjectState: {
300 111561 : if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
301 52235 : state_ = kCodeLargeObjectState;
302 : V8_FALLTHROUGH;
303 : }
304 : case kCodeLargeObjectState: {
305 126385 : if (code_lo_iterator_ != heap_->code_lo_space()->end())
306 : return *(code_lo_iterator_++);
307 52235 : state_ = kFinishedState;
308 : V8_FALLTHROUGH;
309 : }
310 : case kFinishedState:
311 : return nullptr;
312 : default:
313 : break;
314 : }
315 0 : UNREACHABLE();
316 : }
317 :
318 : Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
319 211136 : return top(type) ? top(type)->page() : nullptr;
320 : }
321 :
322 : FreeList* FreeListCategory::owner() { return free_list_; }
323 :
324 : bool FreeListCategory::is_linked() {
325 0 : return prev_ != nullptr || next_ != nullptr;
326 : }
327 :
328 88919047 : AllocationResult LocalAllocationBuffer::AllocateRawAligned(
329 : int size_in_bytes, AllocationAlignment alignment) {
330 : Address current_top = allocation_info_.top();
331 88919047 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
332 :
333 88832344 : Address new_top = current_top + filler_size + size_in_bytes;
334 88832344 : if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
335 :
336 : allocation_info_.set_top(new_top);
337 87035517 : if (filler_size > 0) {
338 0 : return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
339 0 : filler_size);
340 : }
341 :
342 87035517 : return AllocationResult(HeapObject::FromAddress(current_top));
343 : }
344 :
345 : bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
346 536177220 : if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
347 : return true;
348 : }
349 1317777 : return SlowRefillLinearAllocationArea(size_in_bytes);
350 : }
351 :
352 : HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
353 : Address current_top = allocation_info_.top();
354 268113845 : Address new_top = current_top + size_in_bytes;
355 : DCHECK_LE(new_top, allocation_info_.limit());
356 : allocation_info_.set_top(new_top);
357 : return HeapObject::FromAddress(current_top);
358 : }
359 :
360 0 : HeapObject PagedSpace::TryAllocateLinearlyAligned(
361 : int* size_in_bytes, AllocationAlignment alignment) {
362 : Address current_top = allocation_info_.top();
363 0 : int filler_size = Heap::GetFillToAlign(current_top, alignment);
364 :
365 0 : Address new_top = current_top + filler_size + *size_in_bytes;
366 0 : if (new_top > allocation_info_.limit()) return HeapObject();
367 :
368 : allocation_info_.set_top(new_top);
369 0 : if (filler_size > 0) {
370 0 : *size_in_bytes += filler_size;
371 : return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
372 0 : filler_size);
373 : }
374 :
375 : return HeapObject::FromAddress(current_top);
376 : }
377 :
378 268088610 : AllocationResult PagedSpace::AllocateRawUnaligned(
379 : int size_in_bytes, UpdateSkipList update_skip_list) {
380 : DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
381 268126132 : if (!EnsureLinearAllocationArea(size_in_bytes)) {
382 : return AllocationResult::Retry(identity());
383 : }
384 : HeapObject object = AllocateLinearly(size_in_bytes);
385 : DCHECK(!object.is_null());
386 268113845 : if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
387 1908723 : SkipList::Update(object->address(), size_in_bytes);
388 : }
389 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
390 268388751 : return object;
391 : }
392 :
393 :
394 0 : AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
395 : AllocationAlignment alignment) {
396 : DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
397 : DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
398 0 : int allocation_size = size_in_bytes;
399 0 : HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
400 0 : if (object.is_null()) {
401 : // We don't know exactly how much filler we need to align until space is
402 : // allocated, so assume the worst case.
403 0 : int filler_size = Heap::GetMaximumFillToAlign(alignment);
404 0 : allocation_size += filler_size;
405 0 : if (!EnsureLinearAllocationArea(allocation_size)) {
406 : return AllocationResult::Retry(identity());
407 : }
408 0 : allocation_size = size_in_bytes;
409 0 : object = TryAllocateLinearlyAligned(&allocation_size, alignment);
410 : DCHECK(!object.is_null());
411 : }
412 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
413 0 : return object;
414 : }
415 :
416 :
417 209734650 : AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
418 : AllocationAlignment alignment) {
419 232008523 : if (top_on_previous_step_ && top() < top_on_previous_step_ &&
420 5 : SupportsInlineAllocation()) {
421 : // Generated code decreased the top() pointer to do folded allocations.
422 : // The top_on_previous_step_ can be one byte beyond the current page.
423 : DCHECK_NE(top(), kNullAddress);
424 : DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
425 : Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
426 5 : top_on_previous_step_ = top();
427 : }
428 : size_t bytes_since_last =
429 232008519 : top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
430 :
431 : DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
432 : #ifdef V8_HOST_ARCH_32_BIT
433 : AllocationResult result = alignment != kWordAligned
434 : ? AllocateRawAligned(size_in_bytes, alignment)
435 : : AllocateRawUnaligned(size_in_bytes);
436 : #else
437 209734650 : AllocationResult result = AllocateRawUnaligned(size_in_bytes);
438 : #endif
439 : HeapObject heap_obj;
440 420057431 : if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
441 125227433 : AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
442 125227433 : heap_obj->address(), size_in_bytes);
443 125227339 : StartNextInlineAllocationStep();
444 : DCHECK_IMPLIES(
445 : heap()->incremental_marking()->black_allocation(),
446 : heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
447 : }
448 209925552 : return result;
449 : }
450 :
451 :
452 : // -----------------------------------------------------------------------------
453 : // NewSpace
454 :
455 :
456 : AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
457 : AllocationAlignment alignment) {
458 : Address top = allocation_info_.top();
459 0 : int filler_size = Heap::GetFillToAlign(top, alignment);
460 0 : int aligned_size_in_bytes = size_in_bytes + filler_size;
461 :
462 0 : if (allocation_info_.limit() - top <
463 : static_cast<uintptr_t>(aligned_size_in_bytes)) {
464 : // See if we can create room.
465 0 : if (!EnsureAllocation(size_in_bytes, alignment)) {
466 : return AllocationResult::Retry();
467 : }
468 :
469 : top = allocation_info_.top();
470 0 : filler_size = Heap::GetFillToAlign(top, alignment);
471 0 : aligned_size_in_bytes = size_in_bytes + filler_size;
472 : }
473 :
474 : HeapObject obj = HeapObject::FromAddress(top);
475 0 : allocation_info_.set_top(top + aligned_size_in_bytes);
476 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
477 :
478 0 : if (filler_size > 0) {
479 0 : obj = heap()->PrecedeWithFiller(obj, filler_size);
480 : }
481 :
482 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
483 :
484 : return obj;
485 : }
486 :
487 :
488 : AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
489 : Address top = allocation_info_.top();
490 169826739 : if (allocation_info_.limit() < top + size_in_bytes) {
491 : // See if we can create room.
492 459342 : if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
493 : return AllocationResult::Retry();
494 : }
495 :
496 : top = allocation_info_.top();
497 : }
498 :
499 : HeapObject obj = HeapObject::FromAddress(top);
500 169807274 : allocation_info_.set_top(top + size_in_bytes);
501 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
502 :
503 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
504 :
505 : return obj;
506 : }
507 :
508 :
509 : AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
510 : AllocationAlignment alignment) {
511 169826506 : if (top() < top_on_previous_step_) {
512 : // Generated code decreased the top() pointer to do folded allocations
513 : DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
514 : Page::FromAllocationAreaAddress(top_on_previous_step_));
515 4 : top_on_previous_step_ = top();
516 : }
517 : #ifdef V8_HOST_ARCH_32_BIT
518 : return alignment != kWordAligned
519 : ? AllocateRawAligned(size_in_bytes, alignment)
520 : : AllocateRawUnaligned(size_in_bytes);
521 : #else
522 : #ifdef V8_COMPRESS_POINTERS
523 : // TODO(ishell, v8:8875): Consider using aligned allocations once the
524 : // allocation alignment inconsistency is fixed. For now we keep using
525 : // unaligned access since both x64 and arm64 architectures (where pointer
526 : // compression is supported) allow unaligned access to doubles and full words.
527 : #endif // V8_COMPRESS_POINTERS
528 : return AllocateRawUnaligned(size_in_bytes);
529 : #endif
530 : }
531 :
532 498923 : V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
533 : int size_in_bytes, AllocationAlignment alignment) {
534 498923 : base::MutexGuard guard(&mutex_);
535 499101 : return AllocateRaw(size_in_bytes, alignment);
536 : }
537 :
538 199032 : LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
539 : AllocationResult result,
540 : intptr_t size) {
541 199032 : if (result.IsRetry()) return InvalidBuffer();
542 : HeapObject obj;
543 : bool ok = result.To(&obj);
544 : USE(ok);
545 : DCHECK(ok);
546 : Address top = HeapObject::cast(obj)->address();
547 397448 : return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
548 : }
549 :
550 :
551 : bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
552 198659 : if (allocation_info_.top() == other->allocation_info_.limit()) {
553 : allocation_info_.set_top(other->allocation_info_.top());
554 : other->allocation_info_.Reset(kNullAddress, kNullAddress);
555 : return true;
556 : }
557 : return false;
558 : }
559 :
560 : bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
561 56529 : if (IsValid()) {
562 : const Address object_address = object->address();
563 56525 : if ((allocation_info_.top() - object_size) == object_address) {
564 : allocation_info_.set_top(object_address);
565 : return true;
566 : }
567 : }
568 : return false;
569 : }
570 :
571 : } // namespace internal
572 : } // namespace v8
573 :
574 : #endif // V8_HEAP_SPACES_INL_H_
|