Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/spaces.h"
6 :
7 : #include <utility>
8 :
9 : #include "src/base/bits.h"
10 : #include "src/base/macros.h"
11 : #include "src/base/platform/semaphore.h"
12 : #include "src/counters.h"
13 : #include "src/heap/array-buffer-tracker.h"
14 : #include "src/heap/concurrent-marking.h"
15 : #include "src/heap/incremental-marking.h"
16 : #include "src/heap/mark-compact.h"
17 : #include "src/heap/slot-set.h"
18 : #include "src/msan.h"
19 : #include "src/objects-inl.h"
20 : #include "src/snapshot/snapshot.h"
21 : #include "src/v8.h"
22 : #include "src/vm-state-inl.h"
23 :
24 : namespace v8 {
25 : namespace internal {
26 :
27 : // ----------------------------------------------------------------------------
28 : // HeapObjectIterator
29 :
30 33282 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
31 : : cur_addr_(nullptr),
32 : cur_end_(nullptr),
33 : space_(space),
34 : page_range_(space->anchor()->next_page(), space->anchor()),
35 66564 : current_page_(page_range_.begin()) {}
36 :
37 0 : HeapObjectIterator::HeapObjectIterator(Page* page)
38 : : cur_addr_(nullptr),
39 : cur_end_(nullptr),
40 : space_(reinterpret_cast<PagedSpace*>(page->owner())),
41 : page_range_(page),
42 0 : current_page_(page_range_.begin()) {
43 : #ifdef DEBUG
44 : Space* owner = page->owner();
45 : DCHECK(owner == page->heap()->old_space() ||
46 : owner == page->heap()->map_space() ||
47 : owner == page->heap()->code_space());
48 : #endif // DEBUG
49 0 : }
50 :
51 : // We have hit the end of the page and should advance to the next block of
52 : // objects. This happens at the end of the page.
53 111617 : bool HeapObjectIterator::AdvanceToNextPage() {
54 : DCHECK_EQ(cur_addr_, cur_end_);
55 223234 : if (current_page_ == page_range_.end()) return false;
56 78336 : Page* cur_page = *(current_page_++);
57 78336 : Heap* heap = space_->heap();
58 :
59 78336 : heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
60 78336 : cur_page);
61 235008 : if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
62 : heap->minor_mark_compact_collector()->MakeIterable(
63 : cur_page, MarkingTreatmentMode::CLEAR,
64 0 : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
65 78336 : cur_addr_ = cur_page->area_start();
66 78336 : cur_end_ = cur_page->area_end();
67 : DCHECK(cur_page->SweepingDone());
68 78336 : return true;
69 : }
70 :
71 86524 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
72 86524 : : heap_(heap) {
73 : AllSpaces spaces(heap_);
74 519144 : for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
75 432620 : space->PauseAllocationObservers();
76 : }
77 86524 : }
78 :
79 86524 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
80 86524 : AllSpaces spaces(heap_);
81 519144 : for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
82 432620 : space->ResumeAllocationObservers();
83 : }
84 86524 : }
85 :
86 : // -----------------------------------------------------------------------------
87 : // CodeRange
88 :
89 58635 : CodeRange::CodeRange(Isolate* isolate)
90 : : isolate_(isolate),
91 : free_list_(0),
92 : allocation_list_(0),
93 58635 : current_allocation_block_index_(0) {}
94 :
95 58034 : bool CodeRange::SetUp(size_t requested) {
96 : DCHECK(!virtual_memory_.IsReserved());
97 :
98 58034 : if (requested == 0) {
99 : // When a target requires the code range feature, we put all code objects
100 : // in a kMaximalCodeRangeSize range of virtual address space, so that
101 : // they can call each other with near calls.
102 : if (kRequiresCodeRange) {
103 : requested = kMaximalCodeRangeSize;
104 : } else {
105 : return true;
106 : }
107 : }
108 :
109 58034 : if (requested <= kMinimumCodeRangeSize) {
110 : requested = kMinimumCodeRangeSize;
111 : }
112 :
113 : const size_t reserved_area =
114 : kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
115 : if (requested < (kMaximalCodeRangeSize - reserved_area))
116 : requested += reserved_area;
117 :
118 : DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
119 :
120 58035 : VirtualMemory reservation;
121 58035 : if (!AlignedAllocVirtualMemory(
122 : requested,
123 : Max(kCodeRangeAreaAlignment,
124 : static_cast<size_t>(base::OS::AllocateAlignment())),
125 116068 : base::OS::GetRandomMmapAddr(), &reservation)) {
126 : return false;
127 : }
128 :
129 : // We are sure that we have mapped a block of requested addresses.
130 : DCHECK(reservation.size() == requested);
131 58035 : Address base = reinterpret_cast<Address>(reservation.address());
132 :
133 : // On some platforms, specifically Win64, we need to reserve some pages at
134 : // the beginning of an executable space.
135 : if (reserved_area > 0) {
136 : if (!reservation.Commit(base, reserved_area, true)) return false;
137 :
138 : base += reserved_area;
139 : }
140 58035 : Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
141 58035 : size_t size = reservation.size() - (aligned_base - base) - reserved_area;
142 58035 : allocation_list_.emplace_back(aligned_base, size);
143 58035 : current_allocation_block_index_ = 0;
144 :
145 58035 : LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
146 58035 : virtual_memory_.TakeControl(&reservation);
147 58035 : return true;
148 : }
149 :
150 3108 : bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
151 : const FreeBlock& right) {
152 3108 : return left.start < right.start;
153 : }
154 :
155 :
156 84 : bool CodeRange::GetNextAllocationBlock(size_t requested) {
157 180 : for (current_allocation_block_index_++;
158 90 : current_allocation_block_index_ < allocation_list_.size();
159 : current_allocation_block_index_++) {
160 180 : if (requested <= allocation_list_[current_allocation_block_index_].size) {
161 : return true; // Found a large enough allocation block.
162 : }
163 : }
164 :
165 : // Sort and merge the free blocks on the free list and the allocation list.
166 : free_list_.insert(free_list_.end(), allocation_list_.begin(),
167 186 : allocation_list_.end());
168 : allocation_list_.clear();
169 36 : std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
170 186 : for (size_t i = 0; i < free_list_.size();) {
171 114 : FreeBlock merged = free_list_[i];
172 114 : i++;
173 : // Add adjacent free blocks to the current merged block.
174 1518 : while (i < free_list_.size() &&
175 684 : free_list_[i].start == merged.start + merged.size) {
176 606 : merged.size += free_list_[i].size;
177 606 : i++;
178 : }
179 114 : if (merged.size > 0) {
180 90 : allocation_list_.push_back(merged);
181 : }
182 : }
183 : free_list_.clear();
184 :
185 72 : for (current_allocation_block_index_ = 0;
186 36 : current_allocation_block_index_ < allocation_list_.size();
187 : current_allocation_block_index_++) {
188 30 : if (requested <= allocation_list_[current_allocation_block_index_].size) {
189 : return true; // Found a large enough allocation block.
190 : }
191 : }
192 6 : current_allocation_block_index_ = 0;
193 : // Code range is full or too fragmented.
194 6 : return false;
195 : }
196 :
197 :
198 309339 : Address CodeRange::AllocateRawMemory(const size_t requested_size,
199 : const size_t commit_size,
200 : size_t* allocated) {
201 : // request_size includes guards while committed_size does not. Make sure
202 : // callers know about the invariant.
203 309339 : CHECK_LE(commit_size,
204 : requested_size - 2 * MemoryAllocator::CodePageGuardSize());
205 : FreeBlock current;
206 309339 : if (!ReserveBlock(requested_size, ¤t)) {
207 6 : *allocated = 0;
208 6 : return nullptr;
209 : }
210 309333 : *allocated = current.size;
211 : DCHECK(*allocated <= current.size);
212 : DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
213 309333 : if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
214 309333 : &virtual_memory_, current.start, commit_size, *allocated)) {
215 0 : *allocated = 0;
216 0 : ReleaseBlock(¤t);
217 0 : return nullptr;
218 : }
219 309333 : return current.start;
220 : }
221 :
222 :
223 0 : bool CodeRange::CommitRawMemory(Address start, size_t length) {
224 : return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
225 36927 : EXECUTABLE);
226 : }
227 :
228 :
229 0 : bool CodeRange::UncommitRawMemory(Address start, size_t length) {
230 264 : return virtual_memory_.Uncommit(start, length);
231 : }
232 :
233 :
234 299644 : void CodeRange::FreeRawMemory(Address address, size_t length) {
235 : DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
236 299644 : base::LockGuard<base::Mutex> guard(&code_range_mutex_);
237 299645 : free_list_.emplace_back(address, length);
238 299645 : virtual_memory_.Uncommit(address, length);
239 299645 : }
240 :
241 309339 : bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
242 309339 : base::LockGuard<base::Mutex> guard(&code_range_mutex_);
243 : DCHECK(allocation_list_.empty() ||
244 : current_allocation_block_index_ < allocation_list_.size());
245 1546677 : if (allocation_list_.empty() ||
246 618678 : requested_size > allocation_list_[current_allocation_block_index_].size) {
247 : // Find an allocation block large enough.
248 84 : if (!GetNextAllocationBlock(requested_size)) return false;
249 : }
250 : // Commit the requested memory at the start of the current allocation block.
251 : size_t aligned_requested = ::RoundUp(requested_size, MemoryChunk::kAlignment);
252 618666 : *block = allocation_list_[current_allocation_block_index_];
253 : // Don't leave a small free block, useless for a large object or chunk.
254 309333 : if (aligned_requested < (block->size - Page::kPageSize)) {
255 309279 : block->size = aligned_requested;
256 : }
257 : DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
258 618666 : allocation_list_[current_allocation_block_index_].start += block->size;
259 618666 : allocation_list_[current_allocation_block_index_].size -= block->size;
260 309333 : return true;
261 : }
262 :
263 :
264 0 : void CodeRange::ReleaseBlock(const FreeBlock* block) {
265 0 : base::LockGuard<base::Mutex> guard(&code_range_mutex_);
266 0 : free_list_.push_back(*block);
267 0 : }
268 :
269 :
270 : // -----------------------------------------------------------------------------
271 : // MemoryAllocator
272 : //
273 :
274 57422 : MemoryAllocator::MemoryAllocator(Isolate* isolate)
275 : : isolate_(isolate),
276 : code_range_(nullptr),
277 : capacity_(0),
278 : size_(0),
279 : size_executable_(0),
280 : lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
281 : highest_ever_allocated_(reinterpret_cast<void*>(0)),
282 114844 : unmapper_(isolate->heap(), this) {}
283 :
284 57422 : bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
285 57422 : capacity_ = ::RoundUp(capacity, Page::kPageSize);
286 :
287 : size_ = 0;
288 : size_executable_ = 0;
289 :
290 57423 : code_range_ = new CodeRange(isolate_);
291 57422 : if (!code_range_->SetUp(code_range_size)) return false;
292 :
293 57423 : return true;
294 : }
295 :
296 :
297 55789 : void MemoryAllocator::TearDown() {
298 55789 : unmapper()->TearDown();
299 :
300 : // Check that spaces were torn down before MemoryAllocator.
301 : DCHECK_EQ(size_.Value(), 0u);
302 : // TODO(gc) this will be true again when we fix FreeMemory.
303 : // DCHECK_EQ(0, size_executable_);
304 55789 : capacity_ = 0;
305 :
306 55789 : if (last_chunk_.IsReserved()) {
307 0 : last_chunk_.Release();
308 : }
309 :
310 55789 : delete code_range_;
311 55789 : code_range_ = nullptr;
312 55789 : }
313 :
314 287919 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
315 : public:
316 : explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
317 144158 : : CancelableTask(isolate), unmapper_(unmapper) {}
318 :
319 : private:
320 141751 : void RunInternal() override {
321 141751 : unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
322 141872 : unmapper_->pending_unmapping_tasks_semaphore_.Signal();
323 141876 : }
324 :
325 : Unmapper* const unmapper_;
326 : DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
327 : };
328 :
329 211162 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
330 211162 : ReconsiderDelayedChunks();
331 211162 : if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
332 144158 : if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
333 : // kMaxUnmapperTasks are already running. Avoid creating any more.
334 211162 : return;
335 : }
336 144158 : UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
337 : DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
338 144158 : task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
339 144158 : V8::GetCurrentPlatform()->CallOnBackgroundThread(
340 144158 : task, v8::Platform::kShortRunningTask);
341 : } else {
342 67004 : PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
343 : }
344 : }
345 :
346 110176 : void MemoryAllocator::Unmapper::WaitUntilCompleted() {
347 165919 : for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
348 111486 : if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
349 : CancelableTaskManager::kTaskAborted) {
350 54510 : pending_unmapping_tasks_semaphore_.Wait();
351 : }
352 55743 : concurrent_unmapping_tasks_active_ = 0;
353 : }
354 110176 : }
355 :
356 : template <MemoryAllocator::Unmapper::FreeMode mode>
357 264393 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
358 : MemoryChunk* chunk = nullptr;
359 : // Regular chunks.
360 719958 : while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
361 : bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
362 191136 : allocator_->PerformFreeMemory(chunk);
363 191136 : if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
364 : }
365 : if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
366 : // The previous loop uncommitted any pages marked as pooled and added them
367 : // to the pooled list. In case of kReleasePooled we need to free them
368 : // though.
369 220209 : while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
370 164418 : allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
371 : }
372 : }
373 : // Non-regular chunks.
374 269627 : while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
375 4941 : allocator_->PerformFreeMemory(chunk);
376 : }
377 264661 : }
378 :
379 55791 : void MemoryAllocator::Unmapper::TearDown() {
380 55791 : CHECK_EQ(0, concurrent_unmapping_tasks_active_);
381 55791 : ReconsiderDelayedChunks();
382 55791 : CHECK(delayed_regular_chunks_.empty());
383 55791 : PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
384 : for (int i = 0; i < kNumberOfChunkQueues; i++) {
385 : DCHECK(chunks_[i].empty());
386 : }
387 55791 : }
388 :
389 266953 : void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
390 : std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
391 : // Move constructed, so the permanent list should be empty.
392 : DCHECK(delayed_regular_chunks_.empty());
393 603542 : for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
394 69637 : AddMemoryChunkSafe<kRegular>(*it);
395 : }
396 266953 : }
397 :
398 0 : int MemoryAllocator::Unmapper::NumberOfChunks() {
399 0 : base::LockGuard<base::Mutex> guard(&mutex_);
400 : size_t result = 0;
401 0 : for (int i = 0; i < kNumberOfChunkQueues; i++) {
402 0 : result += chunks_[i].size();
403 : }
404 0 : return static_cast<int>(result);
405 : }
406 :
407 262182 : bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
408 262182 : MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
409 : // We cannot free a memory chunk in new space while the sweeper is running
410 : // because the memory chunk can be in the queue of a sweeper task.
411 : // Chunks in old generation are unmapped if they are empty.
412 : DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
413 366380 : return !chunk->InNewSpace() || mc == nullptr ||
414 104198 : !mc->sweeper().sweeping_in_progress();
415 : }
416 :
417 55285 : bool MemoryAllocator::CommitMemory(Address base, size_t size,
418 : Executability executable) {
419 55285 : if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
420 : return false;
421 : }
422 55285 : UpdateAllocatedSpaceLimits(base, base + size);
423 55285 : return true;
424 : }
425 :
426 0 : void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
427 : Executability executable) {
428 : // TODO(gc) make code_range part of memory allocator?
429 : // Code which is part of the code-range does not have its own VirtualMemory.
430 : DCHECK(code_range() == nullptr ||
431 : !code_range()->contains(static_cast<Address>(reservation->address())));
432 : DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
433 : reservation->size() <= Page::kPageSize);
434 :
435 260496 : reservation->Release();
436 0 : }
437 :
438 :
439 463324 : void MemoryAllocator::FreeMemory(Address base, size_t size,
440 463324 : Executability executable) {
441 : // TODO(gc) make code_range part of memory allocator?
442 926648 : if (code_range() != nullptr &&
443 : code_range()->contains(static_cast<Address>(base))) {
444 : DCHECK(executable == EXECUTABLE);
445 298906 : code_range()->FreeRawMemory(base, size);
446 : } else {
447 : DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
448 164418 : bool result = base::OS::ReleaseRegion(base, size);
449 : USE(result);
450 : DCHECK(result);
451 : }
452 463325 : }
453 :
454 436767 : Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
455 : void* hint,
456 : VirtualMemory* controller) {
457 436767 : VirtualMemory reservation;
458 436767 : if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
459 : return nullptr;
460 :
461 : const Address base =
462 436768 : ::RoundUp(static_cast<Address>(reservation.address()), alignment);
463 873536 : if (base + size != reservation.end()) {
464 : const Address unused_start = ::RoundUp(base + size, GetCommitPageSize());
465 0 : reservation.ReleasePartial(unused_start);
466 : }
467 436768 : size_.Increment(reservation.size());
468 436768 : controller->TakeControl(&reservation);
469 436768 : return base;
470 : }
471 :
472 436767 : Address MemoryAllocator::AllocateAlignedMemory(
473 : size_t reserve_size, size_t commit_size, size_t alignment,
474 : Executability executable, void* hint, VirtualMemory* controller) {
475 : DCHECK(commit_size <= reserve_size);
476 436767 : VirtualMemory reservation;
477 : Address base =
478 436767 : ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
479 436768 : if (base == nullptr) return nullptr;
480 :
481 436768 : if (executable == EXECUTABLE) {
482 600 : if (!CommitExecutableMemory(&reservation, base, commit_size,
483 600 : reserve_size)) {
484 : base = nullptr;
485 : }
486 : } else {
487 436168 : if (reservation.Commit(base, commit_size, false)) {
488 436168 : UpdateAllocatedSpaceLimits(base, base + commit_size);
489 : } else {
490 : base = nullptr;
491 : }
492 : }
493 :
494 436768 : if (base == nullptr) {
495 : // Failed to commit the body. Release the mapping and any partially
496 : // committed regions inside it.
497 0 : reservation.Release();
498 : size_.Decrement(reserve_size);
499 0 : return nullptr;
500 : }
501 :
502 436768 : controller->TakeControl(&reservation);
503 436768 : return base;
504 : }
505 :
506 588080 : void Page::InitializeAsAnchor(Space* space) {
507 : set_owner(space);
508 588080 : set_next_chunk(this);
509 : set_prev_chunk(this);
510 : SetFlags(0, static_cast<uintptr_t>(~0));
511 : SetFlag(ANCHOR);
512 588080 : }
513 :
514 0 : Heap* MemoryChunk::synchronized_heap() {
515 : return reinterpret_cast<Heap*>(
516 0 : base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
517 : }
518 :
519 0 : void MemoryChunk::InitializationMemoryFence() {
520 : base::SeqCst_MemoryFence();
521 : #ifdef THREAD_SANITIZER
522 : // Since TSAN does not process memory fences, we use the following annotation
523 : // to tell TSAN that there is no data race when emitting a
524 : // InitializationMemoryFence. Note that the other thread still needs to
525 : // perform MemoryChunk::synchronized_heap().
526 : base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
527 : reinterpret_cast<base::AtomicWord>(heap_));
528 : #endif
529 0 : }
530 :
531 0 : void MemoryChunk::SetReadAndExecutable() {
532 : DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
533 : DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
534 : // Decrementing the write_unprotect_counter_ and changing the page
535 : // protection mode has to be atomic.
536 0 : base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
537 0 : if (write_unprotect_counter_ == 0) {
538 : // This is a corner case that may happen when we have a
539 : // CodeSpaceMemoryModificationScope open and this page was newly
540 : // added.
541 0 : return;
542 : }
543 0 : write_unprotect_counter_--;
544 : DCHECK_LE(write_unprotect_counter_, 1);
545 0 : if (write_unprotect_counter_ == 0) {
546 : Address protect_start =
547 0 : address() + MemoryAllocator::CodePageAreaStartOffset();
548 0 : size_t protect_size = size() - MemoryAllocator::CodePageAreaStartOffset();
549 : DCHECK(
550 : IsAddressAligned(protect_start, MemoryAllocator::GetCommitPageSize()));
551 0 : base::OS::SetReadAndExecutable(protect_start, protect_size);
552 : }
553 : }
554 :
555 0 : void MemoryChunk::SetReadAndWritable() {
556 : DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
557 : DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
558 : // Incrementing the write_unprotect_counter_ and changing the page
559 : // protection mode has to be atomic.
560 0 : base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
561 0 : write_unprotect_counter_++;
562 : DCHECK_LE(write_unprotect_counter_, 2);
563 0 : if (write_unprotect_counter_ == 1) {
564 : Address unprotect_start =
565 0 : address() + MemoryAllocator::CodePageAreaStartOffset();
566 0 : size_t unprotect_size = size() - MemoryAllocator::CodePageAreaStartOffset();
567 : DCHECK(IsAddressAligned(unprotect_start,
568 : MemoryAllocator::GetCommitPageSize()));
569 0 : base::OS::SetReadAndWritable(unprotect_start, unprotect_size, false);
570 : }
571 0 : }
572 :
573 762713 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
574 : Address area_start, Address area_end,
575 : Executability executable, Space* owner,
576 : VirtualMemory* reservation) {
577 : MemoryChunk* chunk = FromAddress(base);
578 :
579 : DCHECK(base == chunk->address());
580 :
581 762713 : chunk->heap_ = heap;
582 762713 : chunk->size_ = size;
583 762713 : chunk->area_start_ = area_start;
584 762713 : chunk->area_end_ = area_end;
585 762713 : chunk->flags_ = Flags(NO_FLAGS);
586 : chunk->set_owner(owner);
587 : chunk->InitializeReservedMemory();
588 762713 : base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
589 762713 : base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
590 : base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
591 762713 : nullptr);
592 : base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
593 762713 : nullptr);
594 762713 : chunk->invalidated_slots_ = nullptr;
595 762713 : chunk->skip_list_ = nullptr;
596 762713 : chunk->progress_bar_ = 0;
597 762713 : chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
598 : chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
599 762713 : chunk->page_protection_change_mutex_ = new base::Mutex();
600 762713 : chunk->write_unprotect_counter_ = 0;
601 762713 : chunk->mutex_ = new base::RecursiveMutex();
602 762713 : chunk->allocated_bytes_ = chunk->area_size();
603 762713 : chunk->wasted_memory_ = 0;
604 762713 : chunk->young_generation_bitmap_ = nullptr;
605 : chunk->set_next_chunk(nullptr);
606 : chunk->set_prev_chunk(nullptr);
607 762713 : chunk->local_tracker_ = nullptr;
608 :
609 : heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
610 :
611 : DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
612 :
613 762713 : if (executable == EXECUTABLE) {
614 : chunk->SetFlag(IS_EXECUTABLE);
615 : }
616 :
617 762713 : if (reservation != nullptr) {
618 762713 : chunk->reservation_.TakeControl(reservation);
619 : }
620 762713 : return chunk;
621 : }
622 :
623 391551 : Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
624 : Page* page = static_cast<Page*>(chunk);
625 : DCHECK_GE(Page::kAllocatableMemory, page->area_size());
626 : // Make sure that categories are initialized before freeing the area.
627 : page->InitializeFreeListCategories();
628 : page->ResetAllocatedBytes();
629 391551 : heap()->incremental_marking()->SetOldSpacePageFlags(page);
630 : page->InitializationMemoryFence();
631 391551 : return page;
632 : }
633 :
634 186834 : Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
635 : DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
636 : bool in_to_space = (id() != kFromSpace);
637 : chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
638 186834 : : MemoryChunk::IN_FROM_SPACE);
639 : DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
640 : : MemoryChunk::IN_TO_SPACE));
641 : Page* page = static_cast<Page*>(chunk);
642 186834 : heap()->incremental_marking()->SetNewSpacePageFlags(page);
643 186834 : page->AllocateLocalTracker();
644 186834 : if (FLAG_minor_mc) {
645 : page->AllocateYoungGenerationBitmap();
646 : heap()
647 : ->minor_mark_compact_collector()
648 : ->non_atomic_marking_state()
649 : ->ClearLiveness(page);
650 : }
651 : page->InitializationMemoryFence();
652 186834 : return page;
653 : }
654 :
655 160812 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
656 : Executability executable, Space* owner) {
657 17986 : if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
658 : STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
659 0 : FATAL("Code page is too large.");
660 : }
661 17508 : heap->incremental_marking()->SetOldSpacePageFlags(chunk);
662 :
663 : MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
664 :
665 : // Initialize the owner field for each contained page (except the first, which
666 : // is initialized by MemoryChunk::Initialize).
667 250636 : for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
668 : addr < chunk->area_end(); addr += Page::kPageSize) {
669 : // Clear out kPageHeaderTag.
670 107810 : Memory::Address_at(addr) = 0;
671 : }
672 : LargePage* page = static_cast<LargePage*>(chunk);
673 : page->InitializationMemoryFence();
674 17508 : return page;
675 : }
676 :
677 577 : Page* Page::ConvertNewToOld(Page* old_page) {
678 : DCHECK(!old_page->is_anchor());
679 : DCHECK(old_page->InNewSpace());
680 577 : OldSpace* old_space = old_page->heap()->old_space();
681 : old_page->set_owner(old_space);
682 : old_page->SetFlags(0, static_cast<uintptr_t>(~0));
683 577 : Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
684 577 : old_space->AddPage(new_page);
685 577 : return new_page;
686 : }
687 :
688 : // Commit MemoryChunk area to the requested size.
689 164318 : bool MemoryChunk::CommitArea(size_t requested) {
690 : size_t guard_size =
691 40832 : IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
692 40832 : size_t header_size = area_start() - address() - guard_size;
693 : size_t commit_size =
694 40832 : ::RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
695 81664 : size_t committed_size = ::RoundUp(header_size + (area_end() - area_start()),
696 40832 : MemoryAllocator::GetCommitPageSize());
697 :
698 40832 : if (commit_size > committed_size) {
699 : // Commit size should be less or equal than the reserved size.
700 : DCHECK(commit_size <= size() - 2 * guard_size);
701 : // Append the committed area.
702 37917 : Address start = address() + committed_size + guard_size;
703 37917 : size_t length = commit_size - committed_size;
704 38985 : if (reservation_.IsReserved()) {
705 : Executability executable =
706 990 : IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
707 990 : if (!heap()->memory_allocator()->CommitMemory(start, length,
708 990 : executable)) {
709 : return false;
710 : }
711 : } else {
712 37191 : CodeRange* code_range = heap_->memory_allocator()->code_range();
713 : DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
714 36927 : if (!code_range->CommitRawMemory(start, length)) return false;
715 : }
716 :
717 : if (Heap::ShouldZapGarbage()) {
718 : heap_->memory_allocator()->ZapBlock(start, length);
719 : }
720 2915 : } else if (commit_size < committed_size) {
721 : DCHECK_LT(0, commit_size);
722 : // Shrink the committed area.
723 1068 : size_t length = committed_size - commit_size;
724 1068 : Address start = address() + committed_size + guard_size - length;
725 1068 : if (reservation_.IsReserved()) {
726 804 : if (!reservation_.Uncommit(start, length)) return false;
727 : } else {
728 528 : CodeRange* code_range = heap_->memory_allocator()->code_range();
729 : DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
730 264 : if (!code_range->UncommitRawMemory(start, length)) return false;
731 : }
732 : }
733 :
734 40832 : area_end_ = area_start_ + requested;
735 40832 : return true;
736 : }
737 :
738 8133 : size_t MemoryChunk::CommittedPhysicalMemory() {
739 8206 : if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
740 4030 : return size();
741 73 : return high_water_mark_.Value();
742 : }
743 :
744 697357 : void MemoryChunk::InsertAfter(MemoryChunk* other) {
745 : MemoryChunk* other_next = other->next_chunk();
746 :
747 : set_next_chunk(other_next);
748 : set_prev_chunk(other);
749 : other_next->set_prev_chunk(this);
750 : other->set_next_chunk(this);
751 697357 : }
752 :
753 :
754 131490 : void MemoryChunk::Unlink() {
755 : MemoryChunk* next_element = next_chunk();
756 : MemoryChunk* prev_element = prev_chunk();
757 : next_element->set_prev_chunk(prev_element);
758 : prev_element->set_next_chunk(next_element);
759 : set_prev_chunk(nullptr);
760 : set_next_chunk(nullptr);
761 131490 : }
762 :
763 745344 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
764 : size_t commit_area_size,
765 : Executability executable,
766 309177 : Space* owner) {
767 : DCHECK_LE(commit_area_size, reserve_area_size);
768 :
769 : size_t chunk_size;
770 1490689 : Heap* heap = isolate_->heap();
771 : Address base = nullptr;
772 745344 : VirtualMemory reservation;
773 : Address area_start = nullptr;
774 : Address area_end = nullptr;
775 : void* address_hint = heap->GetRandomMmapAddr();
776 :
777 : //
778 : // MemoryChunk layout:
779 : //
780 : // Executable
781 : // +----------------------------+<- base aligned with MemoryChunk::kAlignment
782 : // | Header |
783 : // +----------------------------+<- base + CodePageGuardStartOffset
784 : // | Guard |
785 : // +----------------------------+<- area_start_
786 : // | Area |
787 : // +----------------------------+<- area_end_ (area_start + commit_area_size)
788 : // | Committed but not used |
789 : // +----------------------------+<- aligned at OS page boundary
790 : // | Reserved but not committed |
791 : // +----------------------------+<- aligned at OS page boundary
792 : // | Guard |
793 : // +----------------------------+<- base + chunk_size
794 : //
795 : // Non-executable
796 : // +----------------------------+<- base aligned with MemoryChunk::kAlignment
797 : // | Header |
798 : // +----------------------------+<- area_start_ (base + kObjectStartOffset)
799 : // | Area |
800 : // +----------------------------+<- area_end_ (area_start + commit_area_size)
801 : // | Committed but not used |
802 : // +----------------------------+<- aligned at OS page boundary
803 : // | Reserved but not committed |
804 : // +----------------------------+<- base + chunk_size
805 : //
806 :
807 745343 : if (executable == EXECUTABLE) {
808 309176 : chunk_size = ::RoundUp(CodePageAreaStartOffset() + reserve_area_size,
809 618354 : GetCommitPageSize()) +
810 309177 : CodePageGuardSize();
811 :
812 : // Size of header (not executable) plus area (executable).
813 : size_t commit_size = ::RoundUp(
814 309177 : CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
815 : // Allocate executable memory either from code range or from the
816 : // OS.
817 : #ifdef V8_TARGET_ARCH_MIPS64
818 : // Use code range only for large object space on mips64 to keep address
819 : // range within 256-MB memory region.
820 : if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
821 : #else
822 309177 : if (code_range()->valid()) {
823 : #endif
824 : base =
825 308577 : code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
826 : DCHECK(
827 : IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
828 308577 : if (base == nullptr) return nullptr;
829 308577 : size_.Increment(chunk_size);
830 : // Update executable memory size.
831 308577 : size_executable_.Increment(chunk_size);
832 : } else {
833 : base = AllocateAlignedMemory(chunk_size, commit_size,
834 : MemoryChunk::kAlignment, executable,
835 600 : address_hint, &reservation);
836 600 : if (base == nullptr) return nullptr;
837 : // Update executable memory size.
838 600 : size_executable_.Increment(reservation.size());
839 : }
840 :
841 : if (Heap::ShouldZapGarbage()) {
842 : ZapBlock(base, CodePageGuardStartOffset());
843 : ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
844 : }
845 :
846 309177 : area_start = base + CodePageAreaStartOffset();
847 309177 : area_end = area_start + commit_area_size;
848 : } else {
849 : chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
850 872334 : GetCommitPageSize());
851 : size_t commit_size =
852 : ::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
853 436167 : GetCommitPageSize());
854 : base =
855 : AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
856 436167 : executable, address_hint, &reservation);
857 :
858 436168 : if (base == nullptr) return nullptr;
859 :
860 : if (Heap::ShouldZapGarbage()) {
861 : ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
862 : }
863 :
864 436168 : area_start = base + Page::kObjectStartOffset;
865 436168 : area_end = area_start + commit_area_size;
866 : }
867 :
868 : // Use chunk_size for statistics and callbacks because we assume that they
869 : // treat reserved but not-yet committed memory regions of chunks as allocated.
870 : isolate_->counters()->memory_allocated()->Increment(
871 1490690 : static_cast<int>(chunk_size));
872 :
873 1490690 : LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
874 :
875 : // We cannot use the last chunk in the address space because we would
876 : // overflow when comparing top and limit if this chunk is used for a
877 : // linear allocation area.
878 745345 : if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
879 0 : CHECK(!last_chunk_.IsReserved());
880 0 : last_chunk_.TakeControl(&reservation);
881 : UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
882 0 : last_chunk_.size());
883 0 : size_.Decrement(chunk_size);
884 0 : if (executable == EXECUTABLE) {
885 0 : size_executable_.Decrement(chunk_size);
886 : }
887 0 : CHECK(last_chunk_.IsReserved());
888 : return AllocateChunk(reserve_area_size, commit_area_size, executable,
889 0 : owner);
890 : }
891 :
892 : return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
893 745345 : executable, owner, &reservation);
894 : }
895 :
896 1239763 : void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
897 :
898 0 : void Page::ResetFreeListStatistics() {
899 433750 : wasted_memory_ = 0;
900 0 : }
901 :
902 0 : size_t Page::AvailableInFreeList() {
903 0 : size_t sum = 0;
904 0 : ForAllFreeListCategories([&sum](FreeListCategory* category) {
905 0 : sum += category->available();
906 : });
907 0 : return sum;
908 : }
909 :
910 219796 : size_t Page::ShrinkToHighWaterMark() {
911 : // Shrinking only makes sense outside of the CodeRange, where we don't care
912 : // about address space fragmentation.
913 1098900 : VirtualMemory* reservation = reserved_memory();
914 219796 : if (!reservation->IsReserved()) return 0;
915 :
916 : // Shrink pages to high water mark. The water mark points either to a filler
917 : // or the area_end.
918 109920 : HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
919 109920 : if (filler->address() == area_end()) return 0;
920 109904 : CHECK(filler->IsFiller());
921 109904 : if (!filler->IsFreeSpace()) return 0;
922 :
923 : #ifdef DEBUG
924 : // Check the the filler is indeed the last filler on the page.
925 : HeapObjectIterator it(this);
926 : HeapObject* filler2 = nullptr;
927 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
928 : filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
929 : }
930 : if (filler2 == nullptr || filler2->address() == area_end()) return 0;
931 : DCHECK(filler2->IsFiller());
932 : // The deserializer might leave behind fillers. In this case we need to
933 : // iterate even further.
934 : while ((filler2->address() + filler2->Size()) != area_end()) {
935 : filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
936 : DCHECK(filler2->IsFiller());
937 : }
938 : DCHECK_EQ(filler->address(), filler2->address());
939 : #endif // DEBUG
940 :
941 : size_t unused = RoundDown(
942 109892 : static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
943 : MemoryAllocator::GetCommitPageSize());
944 109892 : if (unused > 0) {
945 : DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
946 109882 : if (FLAG_trace_gc_verbose) {
947 : PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
948 : reinterpret_cast<void*>(this),
949 : reinterpret_cast<void*>(area_end()),
950 0 : reinterpret_cast<void*>(area_end() - unused));
951 : }
952 : heap()->CreateFillerObjectAt(
953 : filler->address(),
954 109882 : static_cast<int>(area_end() - filler->address() - unused),
955 219764 : ClearRecordedSlots::kNo);
956 : heap()->memory_allocator()->PartialFreeMemory(
957 329646 : this, address() + size() - unused, unused, area_end() - unused);
958 109882 : CHECK(filler->IsFiller());
959 109882 : CHECK_EQ(filler->address() + filler->Size(), area_end());
960 : }
961 109892 : return unused;
962 : }
963 :
964 147750 : void Page::CreateBlackArea(Address start, Address end) {
965 : DCHECK(heap()->incremental_marking()->black_allocation());
966 : DCHECK_EQ(Page::FromAddress(start), this);
967 : DCHECK_NE(start, end);
968 : DCHECK_EQ(Page::FromAddress(end - 1), this);
969 : IncrementalMarking::MarkingState* marking_state =
970 : heap()->incremental_marking()->marking_state();
971 : marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
972 295500 : AddressToMarkbitIndex(end));
973 147750 : marking_state->IncrementLiveBytes(this, static_cast<int>(end - start));
974 147750 : }
975 :
976 4628 : void Page::DestroyBlackArea(Address start, Address end) {
977 : DCHECK(heap()->incremental_marking()->black_allocation());
978 : DCHECK_EQ(Page::FromAddress(start), this);
979 : DCHECK_NE(start, end);
980 : DCHECK_EQ(Page::FromAddress(end - 1), this);
981 : IncrementalMarking::MarkingState* marking_state =
982 : heap()->incremental_marking()->marking_state();
983 : marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
984 9256 : AddressToMarkbitIndex(end));
985 4628 : marking_state->IncrementLiveBytes(this, -static_cast<int>(end - start));
986 4628 : }
987 :
988 109926 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
989 : size_t bytes_to_free,
990 : Address new_area_end) {
991 109926 : VirtualMemory* reservation = chunk->reserved_memory();
992 : DCHECK(reservation->IsReserved());
993 109926 : chunk->size_ -= bytes_to_free;
994 109926 : chunk->area_end_ = new_area_end;
995 109926 : if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
996 : DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
997 : static_cast<uintptr_t>(GetCommitPageSize()));
998 : DCHECK_EQ(chunk->address() + chunk->size(),
999 : chunk->area_end() + CodePageGuardSize());
1000 0 : reservation->Guard(chunk->area_end_);
1001 : }
1002 : // On e.g. Windows, a reservation may be larger than a page and releasing
1003 : // partially starting at |start_free| will also release the potentially
1004 : // unused part behind the current page.
1005 109926 : const size_t released_bytes = reservation->ReleasePartial(start_free);
1006 : DCHECK_GE(size_.Value(), released_bytes);
1007 : size_.Decrement(released_bytes);
1008 : isolate_->counters()->memory_allocated()->Decrement(
1009 219852 : static_cast<int>(released_bytes));
1010 109926 : }
1011 :
1012 1040259 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
1013 : DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1014 741352 : LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
1015 :
1016 : isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
1017 741352 : chunk->IsEvacuationCandidate());
1018 :
1019 1183797 : VirtualMemory* reservation = chunk->reserved_memory();
1020 : const size_t size =
1021 741352 : reservation->IsReserved() ? reservation->size() : chunk->size();
1022 : DCHECK_GE(size_.Value(), static_cast<size_t>(size));
1023 : size_.Decrement(size);
1024 1482704 : isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1025 741352 : if (chunk->executable() == EXECUTABLE) {
1026 : DCHECK_GE(size_executable_.Value(), size);
1027 : size_executable_.Decrement(size);
1028 : }
1029 :
1030 : chunk->SetFlag(MemoryChunk::PRE_FREED);
1031 741352 : }
1032 :
1033 :
1034 1038849 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
1035 : DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1036 739943 : chunk->ReleaseAllocatedMemory();
1037 :
1038 1299340 : VirtualMemory* reservation = chunk->reserved_memory();
1039 739938 : if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1040 180536 : UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
1041 : } else {
1042 559402 : if (reservation->IsReserved()) {
1043 : FreeMemory(reservation, chunk->executable());
1044 : } else {
1045 298906 : FreeMemory(chunk->address(), chunk->size(), chunk->executable());
1046 : }
1047 : }
1048 739944 : }
1049 :
1050 : template <MemoryAllocator::FreeMode mode>
1051 905770 : void MemoryAllocator::Free(MemoryChunk* chunk) {
1052 : switch (mode) {
1053 : case kFull:
1054 543867 : PreFreeMemory(chunk);
1055 543867 : PerformFreeMemory(chunk);
1056 : break;
1057 : case kAlreadyPooled:
1058 : // Pooled pages cannot be touched anymore as their memory is uncommitted.
1059 164418 : FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
1060 : Executability::NOT_EXECUTABLE);
1061 : break;
1062 : case kPooledAndQueue:
1063 : DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
1064 : DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
1065 : chunk->SetFlag(MemoryChunk::POOLED);
1066 : // Fall through to kPreFreeAndQueue.
1067 : case kPreFreeAndQueue:
1068 197485 : PreFreeMemory(chunk);
1069 : // The chunks added to this queue will be freed by a concurrent thread.
1070 197485 : unmapper()->AddMemoryChunkSafe(chunk);
1071 : break;
1072 : }
1073 905770 : }
1074 :
1075 : template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
1076 :
1077 : template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
1078 : MemoryChunk* chunk);
1079 :
1080 : template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
1081 : MemoryChunk* chunk);
1082 :
1083 : template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
1084 : MemoryChunk* chunk);
1085 :
1086 : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
1087 577807 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
1088 : Executability executable) {
1089 : MemoryChunk* chunk = nullptr;
1090 : if (alloc_mode == kPooled) {
1091 : DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
1092 : DCHECK_EQ(executable, NOT_EXECUTABLE);
1093 186834 : chunk = AllocatePagePooled(owner);
1094 : }
1095 186834 : if (chunk == nullptr) {
1096 560439 : chunk = AllocateChunk(size, size, executable, owner);
1097 : }
1098 577808 : if (chunk == nullptr) return nullptr;
1099 577808 : return owner->InitializePage(chunk, executable);
1100 : }
1101 :
1102 : template Page*
1103 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1104 : size_t size, PagedSpace* owner, Executability executable);
1105 : template Page*
1106 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1107 : size_t size, SemiSpace* owner, Executability executable);
1108 : template Page*
1109 : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1110 : size_t size, SemiSpace* owner, Executability executable);
1111 :
1112 17508 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
1113 : LargeObjectSpace* owner,
1114 : Executability executable) {
1115 17508 : MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
1116 17508 : if (chunk == nullptr) return nullptr;
1117 17508 : return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
1118 : }
1119 :
1120 : template <typename SpaceType>
1121 186834 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1122 186834 : MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
1123 186834 : if (chunk == nullptr) return nullptr;
1124 : const int size = MemoryChunk::kPageSize;
1125 : const Address start = reinterpret_cast<Address>(chunk);
1126 17368 : const Address area_start = start + MemoryChunk::kObjectStartOffset;
1127 17368 : const Address area_end = start + size;
1128 17368 : if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
1129 : return nullptr;
1130 : }
1131 : VirtualMemory reservation(start, size);
1132 17368 : MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1133 17368 : NOT_EXECUTABLE, owner, &reservation);
1134 : size_.Increment(size);
1135 17368 : return chunk;
1136 : }
1137 :
1138 17368 : bool MemoryAllocator::CommitBlock(Address start, size_t size,
1139 : Executability executable) {
1140 17368 : if (!CommitMemory(start, size, executable)) return false;
1141 :
1142 : if (Heap::ShouldZapGarbage()) {
1143 : ZapBlock(start, size);
1144 : }
1145 :
1146 34736 : isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
1147 17368 : return true;
1148 : }
1149 :
1150 :
1151 180535 : bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
1152 180535 : if (!base::OS::UncommitRegion(start, size)) return false;
1153 361072 : isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1154 180536 : return true;
1155 : }
1156 :
1157 :
1158 0 : void MemoryAllocator::ZapBlock(Address start, size_t size) {
1159 0 : for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1160 0 : Memory::Address_at(start + s) = kZapValue;
1161 : }
1162 0 : }
1163 :
1164 : #ifdef DEBUG
1165 : void MemoryAllocator::ReportStatistics() {
1166 : size_t size = Size();
1167 : float pct = static_cast<float>(capacity_ - size) / capacity_;
1168 : PrintF(" capacity: %zu , used: %" PRIuS ", available: %%%d\n\n",
1169 : capacity_, size, static_cast<int>(pct * 100));
1170 : }
1171 : #endif
1172 :
1173 3081472 : size_t MemoryAllocator::CodePageGuardStartOffset() {
1174 : // We are guarding code pages: the first OS page after the header
1175 : // will be protected as non-writable.
1176 3081473 : return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1177 : }
1178 :
1179 1962 : size_t MemoryAllocator::CodePageGuardSize() {
1180 2501408 : return static_cast<int>(GetCommitPageSize());
1181 : }
1182 :
1183 1531364 : size_t MemoryAllocator::CodePageAreaStartOffset() {
1184 : // We are guarding code pages: the first OS page after the header
1185 : // will be protected as non-writable.
1186 3062729 : return CodePageGuardStartOffset() + CodePageGuardSize();
1187 : }
1188 :
1189 81617 : size_t MemoryAllocator::CodePageAreaEndOffset() {
1190 : // We are guarding code pages: the last OS page will be protected as
1191 : // non-writable.
1192 293146 : return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1193 : }
1194 :
1195 368426 : intptr_t MemoryAllocator::GetCommitPageSize() {
1196 7988744 : if (FLAG_v8_os_page_size != 0) {
1197 : DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1198 0 : return FLAG_v8_os_page_size * KB;
1199 : } else {
1200 7988744 : return base::OS::CommitPageSize();
1201 : }
1202 : }
1203 :
1204 309933 : bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
1205 : size_t commit_size,
1206 : size_t reserved_size) {
1207 : // Commit page header (not executable).
1208 : Address header = start;
1209 309933 : size_t header_size = CodePageGuardStartOffset();
1210 309933 : if (vm->Commit(header, header_size, false)) {
1211 : // Create guard page after the header.
1212 309933 : if (vm->Guard(start + CodePageGuardStartOffset())) {
1213 : // Commit page body (executable).
1214 309933 : Address body = start + CodePageAreaStartOffset();
1215 309933 : size_t body_size = commit_size - CodePageGuardStartOffset();
1216 309933 : if (vm->Commit(body, body_size, true)) {
1217 : // Create guard page before the end.
1218 309933 : if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
1219 309933 : UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
1220 619866 : commit_size -
1221 619866 : CodePageGuardStartOffset());
1222 309933 : return true;
1223 : }
1224 0 : vm->Uncommit(body, body_size);
1225 : }
1226 : }
1227 0 : vm->Uncommit(header, header_size);
1228 : }
1229 : return false;
1230 : }
1231 :
1232 :
1233 : // -----------------------------------------------------------------------------
1234 : // MemoryChunk implementation
1235 :
1236 14966 : bool MemoryChunk::contains_array_buffers() {
1237 29679 : return local_tracker() != nullptr && !local_tracker()->IsEmpty();
1238 : }
1239 :
1240 741347 : void MemoryChunk::ReleaseAllocatedMemory() {
1241 741347 : if (skip_list_ != nullptr) {
1242 137733 : delete skip_list_;
1243 137733 : skip_list_ = nullptr;
1244 : }
1245 741347 : if (mutex_ != nullptr) {
1246 741346 : delete mutex_;
1247 741351 : mutex_ = nullptr;
1248 : }
1249 741352 : if (page_protection_change_mutex_ != nullptr) {
1250 741351 : delete page_protection_change_mutex_;
1251 741351 : page_protection_change_mutex_ = nullptr;
1252 : }
1253 741352 : ReleaseSlotSet<OLD_TO_NEW>();
1254 741347 : ReleaseSlotSet<OLD_TO_OLD>();
1255 741347 : ReleaseTypedSlotSet<OLD_TO_NEW>();
1256 741346 : ReleaseTypedSlotSet<OLD_TO_OLD>();
1257 741346 : ReleaseInvalidatedSlots();
1258 741345 : if (local_tracker_ != nullptr) ReleaseLocalTracker();
1259 741345 : if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
1260 741345 : }
1261 :
1262 84882 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
1263 84882 : size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1264 : DCHECK_LT(0, pages);
1265 84882 : SlotSet* slot_set = new SlotSet[pages];
1266 171271 : for (size_t i = 0; i < pages; i++) {
1267 86384 : slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1268 : }
1269 84887 : return slot_set;
1270 : }
1271 :
1272 : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
1273 : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
1274 :
1275 : template <RememberedSetType type>
1276 84882 : SlotSet* MemoryChunk::AllocateSlotSet() {
1277 84882 : SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1278 : SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1279 84887 : &slot_set_[type], nullptr, slot_set);
1280 84887 : if (old_slot_set != nullptr) {
1281 64 : delete[] slot_set;
1282 : slot_set = old_slot_set;
1283 : }
1284 : DCHECK(slot_set);
1285 84887 : return slot_set;
1286 : }
1287 :
1288 : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
1289 : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1290 :
1291 : template <RememberedSetType type>
1292 1489293 : void MemoryChunk::ReleaseSlotSet() {
1293 1489293 : SlotSet* slot_set = slot_set_[type];
1294 1489293 : if (slot_set) {
1295 82448 : slot_set_[type] = nullptr;
1296 82448 : delete[] slot_set;
1297 : }
1298 1489295 : }
1299 :
1300 : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
1301 : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1302 :
1303 : template <RememberedSetType type>
1304 9056 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1305 9056 : TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
1306 : TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1307 9056 : &typed_slot_set_[type], nullptr, typed_slot_set);
1308 9056 : if (old_value != nullptr) {
1309 0 : delete typed_slot_set;
1310 : typed_slot_set = old_value;
1311 : }
1312 : DCHECK(typed_slot_set);
1313 9056 : return typed_slot_set;
1314 : }
1315 :
1316 : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
1317 : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1318 :
1319 : template <RememberedSetType type>
1320 1485554 : void MemoryChunk::ReleaseTypedSlotSet() {
1321 1485554 : TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1322 1485554 : if (typed_slot_set) {
1323 8786 : typed_slot_set_[type] = nullptr;
1324 8786 : delete typed_slot_set;
1325 : }
1326 1485554 : }
1327 :
1328 225 : InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
1329 : DCHECK_NULL(invalidated_slots_);
1330 450 : invalidated_slots_ = new InvalidatedSlots();
1331 225 : return invalidated_slots_;
1332 : }
1333 :
1334 741678 : void MemoryChunk::ReleaseInvalidatedSlots() {
1335 741678 : if (invalidated_slots_) {
1336 449 : delete invalidated_slots_;
1337 225 : invalidated_slots_ = nullptr;
1338 : }
1339 741679 : }
1340 :
1341 56777 : void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
1342 134064 : int size) {
1343 56777 : if (!ShouldSkipEvacuationSlotRecording()) {
1344 44688 : if (invalidated_slots() == nullptr) {
1345 225 : AllocateInvalidatedSlots();
1346 : }
1347 44688 : int old_size = (*invalidated_slots())[object];
1348 89376 : (*invalidated_slots())[object] = std::max(old_size, size);
1349 : }
1350 56777 : }
1351 :
1352 189862 : void MemoryChunk::AllocateLocalTracker() {
1353 : DCHECK_NULL(local_tracker_);
1354 379724 : local_tracker_ = new LocalArrayBufferTracker(heap());
1355 189862 : }
1356 :
1357 185435 : void MemoryChunk::ReleaseLocalTracker() {
1358 : DCHECK_NOT_NULL(local_tracker_);
1359 185435 : delete local_tracker_;
1360 185435 : local_tracker_ = nullptr;
1361 185435 : }
1362 :
1363 0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
1364 : DCHECK_NULL(young_generation_bitmap_);
1365 0 : young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
1366 0 : }
1367 :
1368 0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
1369 : DCHECK_NOT_NULL(young_generation_bitmap_);
1370 0 : free(young_generation_bitmap_);
1371 0 : young_generation_bitmap_ = nullptr;
1372 0 : }
1373 :
1374 : // -----------------------------------------------------------------------------
1375 : // PagedSpace implementation
1376 :
1377 : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1378 : ObjectSpace::kObjectSpaceNewSpace);
1379 : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1380 : ObjectSpace::kObjectSpaceOldSpace);
1381 : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1382 : ObjectSpace::kObjectSpaceCodeSpace);
1383 : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1384 : ObjectSpace::kObjectSpaceMapSpace);
1385 :
1386 152182 : void Space::AddAllocationObserver(AllocationObserver* observer) {
1387 152182 : allocation_observers_.push_back(observer);
1388 152182 : StartNextInlineAllocationStep();
1389 152182 : }
1390 :
1391 147993 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1392 : auto it = std::find(allocation_observers_.begin(),
1393 147993 : allocation_observers_.end(), observer);
1394 : DCHECK(allocation_observers_.end() != it);
1395 147993 : allocation_observers_.erase(it);
1396 147993 : StartNextInlineAllocationStep();
1397 147993 : }
1398 :
1399 432620 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
1400 :
1401 86524 : void Space::ResumeAllocationObservers() {
1402 432620 : allocation_observers_paused_ = false;
1403 86524 : }
1404 :
1405 153816089 : void Space::AllocationStep(int bytes_since_last, Address soon_object,
1406 153671069 : int size) {
1407 153816089 : if (!allocation_observers_paused_) {
1408 153671069 : heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
1409 329205029 : for (AllocationObserver* observer : allocation_observers_) {
1410 21862954 : observer->AllocationStep(bytes_since_last, soon_object, size);
1411 : }
1412 : }
1413 153816058 : }
1414 :
1415 0 : intptr_t Space::GetNextInlineAllocationStepSize() {
1416 : intptr_t next_step = 0;
1417 89397139 : for (AllocationObserver* observer : allocation_observers_) {
1418 : next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
1419 22387066 : : observer->bytes_to_next_step();
1420 : }
1421 : DCHECK(allocation_observers_.size() == 0 || next_step != 0);
1422 0 : return next_step;
1423 : }
1424 :
1425 478070 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1426 : Executability executable)
1427 : : Space(heap, space, executable),
1428 : anchor_(this),
1429 : free_list_(this),
1430 : locked_page_(nullptr),
1431 1912280 : top_on_previous_step_(0) {
1432 478070 : area_size_ = MemoryAllocator::PageAreaSize(space);
1433 : accounting_stats_.Clear();
1434 :
1435 : allocation_info_.Reset(nullptr, nullptr);
1436 478070 : }
1437 :
1438 :
1439 165004 : bool PagedSpace::SetUp() { return true; }
1440 :
1441 :
1442 0 : bool PagedSpace::HasBeenSetUp() { return true; }
1443 :
1444 :
1445 473168 : void PagedSpace::TearDown() {
1446 1315058 : for (auto it = begin(); it != end();) {
1447 : Page* page = *(it++); // Will be erased.
1448 368722 : ArrayBufferTracker::FreeAll(page);
1449 368722 : heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
1450 : }
1451 : anchor_.set_next_page(&anchor_);
1452 : anchor_.set_prev_page(&anchor_);
1453 : accounting_stats_.Clear();
1454 473168 : }
1455 :
1456 265119 : void PagedSpace::RefillFreeList() {
1457 : // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1458 : // generation spaces out.
1459 265119 : if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1460 : identity() != MAP_SPACE) {
1461 265135 : return;
1462 : }
1463 265122 : MarkCompactCollector* collector = heap()->mark_compact_collector();
1464 : size_t added = 0;
1465 : {
1466 422852 : Page* p = nullptr;
1467 949596 : while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
1468 : // Only during compaction pages can actually change ownership. This is
1469 : // safe because there exists no other competing action on the page links
1470 : // during compaction.
1471 422852 : if (is_local()) {
1472 : DCHECK_NE(this, p->owner());
1473 : PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
1474 28783 : base::LockGuard<base::Mutex> guard(owner->mutex());
1475 28783 : owner->RefineAllocatedBytesAfterSweeping(p);
1476 28783 : owner->RemovePage(p);
1477 28783 : added += AddPage(p);
1478 : } else {
1479 394069 : base::LockGuard<base::Mutex> guard(mutex());
1480 : DCHECK_EQ(this, p->owner());
1481 394069 : RefineAllocatedBytesAfterSweeping(p);
1482 394069 : added += RelinkFreeListCategories(p);
1483 : }
1484 422852 : added += p->wasted_memory();
1485 422852 : if (is_local() && (added > kCompactionMemoryWanted)) break;
1486 : }
1487 : }
1488 : }
1489 :
1490 200937 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1491 200937 : base::LockGuard<base::Mutex> guard(mutex());
1492 :
1493 : DCHECK(identity() == other->identity());
1494 : // Unmerged fields:
1495 : // area_size_
1496 : // anchor_
1497 :
1498 200937 : other->EmptyAllocationInfo();
1499 :
1500 : // The linear allocation area of {other} should be destroyed now.
1501 : DCHECK_NULL(other->top());
1502 : DCHECK_NULL(other->limit());
1503 :
1504 : // Move over pages.
1505 469680 : for (auto it = other->begin(); it != other->end();) {
1506 : Page* p = *(it++);
1507 : // Relinking requires the category to be unlinked.
1508 67806 : other->RemovePage(p);
1509 67806 : AddPage(p);
1510 : DCHECK_EQ(p->AvailableInFreeList(),
1511 : p->AvailableInFreeListFromAllocatedBytes());
1512 : }
1513 : DCHECK_EQ(0u, other->Size());
1514 : DCHECK_EQ(0u, other->Capacity());
1515 200937 : }
1516 :
1517 :
1518 18 : size_t PagedSpace::CommittedPhysicalMemory() {
1519 18 : if (!base::OS::HasLazyCommits()) return CommittedMemory();
1520 18 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1521 : size_t size = 0;
1522 154 : for (Page* page : *this) {
1523 59 : size += page->CommittedPhysicalMemory();
1524 : }
1525 18 : return size;
1526 : }
1527 :
1528 10 : bool PagedSpace::ContainsSlow(Address addr) {
1529 : Page* p = Page::FromAddress(addr);
1530 230 : for (Page* page : *this) {
1531 110 : if (page == p) return true;
1532 : }
1533 5 : return false;
1534 : }
1535 :
1536 1268556 : void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
1537 422852 : CHECK(page->SweepingDone());
1538 : auto marking_state =
1539 13499 : heap()->incremental_marking()->non_atomic_marking_state();
1540 : // The live_byte on the page was accounted in the space allocated
1541 : // bytes counter. After sweeping allocated_bytes() contains the
1542 : // accurate live byte count on the page.
1543 422852 : size_t old_counter = marking_state->live_bytes(page);
1544 : size_t new_counter = page->allocated_bytes();
1545 : DCHECK_GE(old_counter, new_counter);
1546 422852 : if (old_counter > new_counter) {
1547 13499 : DecreaseAllocatedBytes(old_counter - new_counter, page);
1548 : // Give the heap a chance to adjust counters in response to the
1549 : // more precise and smaller old generation size.
1550 : heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
1551 : }
1552 : marking_state->SetLiveBytes(page, 0);
1553 422852 : }
1554 :
1555 27346 : Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
1556 27346 : base::LockGuard<base::Mutex> guard(mutex());
1557 : // Check for pages that still contain free list entries. Bail out for smaller
1558 : // categories.
1559 : const int minimum_category =
1560 54726 : static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
1561 : Page* page = free_list()->GetPageForCategoryType(kHuge);
1562 27363 : if (!page && static_cast<int>(kLarge) >= minimum_category)
1563 : page = free_list()->GetPageForCategoryType(kLarge);
1564 27363 : if (!page && static_cast<int>(kMedium) >= minimum_category)
1565 : page = free_list()->GetPageForCategoryType(kMedium);
1566 27363 : if (!page && static_cast<int>(kSmall) >= minimum_category)
1567 : page = free_list()->GetPageForCategoryType(kSmall);
1568 27363 : if (!page && static_cast<int>(kTiny) >= minimum_category)
1569 : page = free_list()->GetPageForCategoryType(kTiny);
1570 27363 : if (!page && static_cast<int>(kTiniest) >= minimum_category)
1571 : page = free_list()->GetPageForCategoryType(kTiniest);
1572 27363 : if (!page) return nullptr;
1573 21390 : RemovePage(page);
1574 21390 : return page;
1575 : }
1576 :
1577 1019032 : size_t PagedSpace::AddPage(Page* page) {
1578 1019032 : CHECK(page->SweepingDone());
1579 509516 : page->set_owner(this);
1580 509516 : page->InsertAfter(anchor()->prev_page());
1581 : AccountCommitted(page->size());
1582 : IncreaseCapacity(page->area_size());
1583 : IncreaseAllocatedBytes(page->allocated_bytes(), page);
1584 509516 : return RelinkFreeListCategories(page);
1585 : }
1586 :
1587 235958 : void PagedSpace::RemovePage(Page* page) {
1588 235958 : CHECK(page->SweepingDone());
1589 117979 : page->Unlink();
1590 : UnlinkFreeListCategories(page);
1591 : DecreaseAllocatedBytes(page->allocated_bytes(), page);
1592 : DecreaseCapacity(page->area_size());
1593 : AccountUncommitted(page->size());
1594 117979 : }
1595 :
1596 219796 : size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
1597 219796 : size_t unused = page->ShrinkToHighWaterMark();
1598 : accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1599 : AccountUncommitted(unused);
1600 219796 : return unused;
1601 : }
1602 :
1603 164814 : void PagedSpace::ShrinkImmortalImmovablePages() {
1604 : DCHECK(!heap()->deserialization_complete());
1605 164814 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1606 164814 : EmptyAllocationInfo();
1607 : ResetFreeList();
1608 :
1609 769172 : for (Page* page : *this) {
1610 : DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1611 219772 : ShrinkPageToHighWaterMark(page);
1612 : }
1613 164814 : }
1614 :
1615 782019 : bool PagedSpace::Expand() {
1616 : // Always lock against the main space as we can only adjust capacity and
1617 : // pages concurrently for the main paged space.
1618 1954948 : base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
1619 :
1620 : const int size = AreaSize();
1621 :
1622 782020 : if (!heap()->CanExpandOldGeneration(size)) return false;
1623 :
1624 : Page* page =
1625 390960 : heap()->memory_allocator()->AllocatePage(size, this, executable());
1626 390960 : if (page == nullptr) return false;
1627 : // Pages created during bootstrapping may contain immortal immovable objects.
1628 390960 : if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
1629 390960 : AddPage(page);
1630 : Free(page->area_start(), page->area_size());
1631 : DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1632 390960 : return true;
1633 : }
1634 :
1635 :
1636 116266 : int PagedSpace::CountTotalPages() {
1637 : int count = 0;
1638 845718 : for (Page* page : *this) {
1639 306593 : count++;
1640 : USE(page);
1641 : }
1642 116266 : return count;
1643 : }
1644 :
1645 :
1646 170400 : void PagedSpace::ResetFreeListStatistics() {
1647 1208300 : for (Page* page : *this) {
1648 : page->ResetFreeListStatistics();
1649 : }
1650 170400 : }
1651 :
1652 1539918 : void PagedSpace::SetAllocationInfo(Address top, Address limit) {
1653 : SetTopAndLimit(top, limit);
1654 3079828 : if (top != nullptr && top != limit &&
1655 1539915 : heap()->incremental_marking()->black_allocation()) {
1656 127081 : Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1657 : }
1658 1539913 : }
1659 :
1660 153564628 : void PagedSpace::DecreaseLimit(Address new_limit) {
1661 : Address old_limit = limit();
1662 : DCHECK_LE(top(), new_limit);
1663 : DCHECK_GE(old_limit, new_limit);
1664 153564628 : if (new_limit != old_limit) {
1665 : SetTopAndLimit(top(), new_limit);
1666 506 : Free(new_limit, old_limit - new_limit);
1667 506 : if (heap()->incremental_marking()->black_allocation()) {
1668 : Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
1669 260 : old_limit);
1670 : }
1671 : }
1672 153564628 : }
1673 :
1674 155104518 : Address PagedSpace::ComputeLimit(Address start, Address end,
1675 : size_t size_in_bytes) {
1676 : DCHECK_GE(end - start, size_in_bytes);
1677 :
1678 177028488 : if (heap()->inline_allocation_disabled()) {
1679 : // Keep the linear allocation area to fit exactly the requested size.
1680 98883 : return start + size_in_bytes;
1681 486940820 : } else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
1682 176875981 : identity() == OLD_SPACE && !is_local()) {
1683 : // Generated code may allocate inline from the linear allocation area for
1684 : // Old Space. To make sure we can observe these allocations, we use a lower
1685 : // limit.
1686 : size_t step = RoundSizeDownToObjectAlignment(
1687 43740696 : static_cast<int>(GetNextInlineAllocationStepSize()));
1688 65611041 : return Max(start + size_in_bytes, Min(start + step, end));
1689 : } else {
1690 : // The entire node can be used as the linear allocation area.
1691 : return end;
1692 : }
1693 : }
1694 :
1695 153978211 : void PagedSpace::StartNextInlineAllocationStep() {
1696 307754738 : if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
1697 307129162 : top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
1698 153564581 : DecreaseLimit(ComputeLimit(top(), limit(), 0));
1699 : }
1700 153978265 : }
1701 :
1702 56997 : void PagedSpace::MarkAllocationInfoBlack() {
1703 : DCHECK(heap()->incremental_marking()->black_allocation());
1704 : Address current_top = top();
1705 : Address current_limit = limit();
1706 56997 : if (current_top != nullptr && current_top != current_limit) {
1707 : Page::FromAllocationAreaAddress(current_top)
1708 20027 : ->CreateBlackArea(current_top, current_limit);
1709 : }
1710 56997 : }
1711 :
1712 6459 : void PagedSpace::UnmarkAllocationInfo() {
1713 : Address current_top = top();
1714 : Address current_limit = limit();
1715 6459 : if (current_top != nullptr && current_top != current_limit) {
1716 : Page::FromAllocationAreaAddress(current_top)
1717 4368 : ->DestroyBlackArea(current_top, current_limit);
1718 : }
1719 6459 : }
1720 :
1721 : // Empty space allocation info, returning unused area to free list.
1722 2586281 : void PagedSpace::EmptyAllocationInfo() {
1723 : // Mark the old linear allocation area with a free space map so it can be
1724 : // skipped when scanning the heap.
1725 : Address current_top = top();
1726 : Address current_limit = limit();
1727 2586281 : if (current_top == nullptr) {
1728 : DCHECK_NULL(current_limit);
1729 2586281 : return;
1730 : }
1731 :
1732 1435988 : if (heap()->incremental_marking()->black_allocation()) {
1733 : Page* page = Page::FromAllocationAreaAddress(current_top);
1734 :
1735 : // Clear the bits in the unused black area.
1736 140053 : if (current_top != current_limit) {
1737 : IncrementalMarking::MarkingState* marking_state =
1738 : heap()->incremental_marking()->marking_state();
1739 : marking_state->bitmap(page)->ClearRange(
1740 : page->AddressToMarkbitIndex(current_top),
1741 214124 : page->AddressToMarkbitIndex(current_limit));
1742 : marking_state->IncrementLiveBytes(
1743 107062 : page, -static_cast<int>(current_limit - current_top));
1744 : }
1745 : }
1746 :
1747 1435988 : if (top_on_previous_step_) {
1748 : DCHECK(current_top >= top_on_previous_step_);
1749 174243 : AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
1750 174243 : nullptr, 0);
1751 174243 : top_on_previous_step_ = 0;
1752 : }
1753 : SetTopAndLimit(nullptr, nullptr);
1754 : DCHECK_GE(current_limit, current_top);
1755 1435985 : Free(current_top, current_limit - current_top);
1756 : }
1757 :
1758 10944 : void PagedSpace::ReleasePage(Page* page) {
1759 : DCHECK_EQ(
1760 : 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
1761 : page));
1762 : DCHECK_EQ(page->owner(), this);
1763 :
1764 10944 : free_list_.EvictFreeListItems(page);
1765 : DCHECK(!free_list_.ContainsPageFreeListItems(page));
1766 :
1767 21888 : if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1768 : DCHECK(!top_on_previous_step_);
1769 : allocation_info_.Reset(nullptr, nullptr);
1770 : }
1771 :
1772 : // If page is still in a list, unlink it from that list.
1773 21888 : if (page->next_chunk() != nullptr) {
1774 : DCHECK_NOT_NULL(page->prev_chunk());
1775 7224 : page->Unlink();
1776 : }
1777 10944 : AccountUncommitted(page->size());
1778 : accounting_stats_.DecreaseCapacity(page->area_size());
1779 10944 : heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1780 10944 : }
1781 :
1782 0 : void PagedSpace::SetReadAndExecutable() {
1783 : DCHECK(identity() == CODE_SPACE);
1784 0 : for (Page* page : *this) {
1785 0 : page->SetReadAndExecutable();
1786 : }
1787 0 : }
1788 :
1789 0 : void PagedSpace::SetReadAndWritable() {
1790 : DCHECK(identity() == CODE_SPACE);
1791 0 : for (Page* page : *this) {
1792 0 : page->SetReadAndWritable();
1793 : }
1794 0 : }
1795 :
1796 33279 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1797 66558 : return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1798 : }
1799 :
1800 : #ifdef DEBUG
1801 : void PagedSpace::Print() {}
1802 : #endif
1803 :
1804 : #ifdef VERIFY_HEAP
1805 : void PagedSpace::Verify(ObjectVisitor* visitor) {
1806 : bool allocation_pointer_found_in_space =
1807 : (allocation_info_.top() == allocation_info_.limit());
1808 : for (Page* page : *this) {
1809 : CHECK(page->owner() == this);
1810 : if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1811 : allocation_pointer_found_in_space = true;
1812 : }
1813 : CHECK(page->SweepingDone());
1814 : HeapObjectIterator it(page);
1815 : Address end_of_previous_object = page->area_start();
1816 : Address top = page->area_end();
1817 : for (HeapObject* object = it.Next(); object != nullptr;
1818 : object = it.Next()) {
1819 : CHECK(end_of_previous_object <= object->address());
1820 :
1821 : // The first word should be a map, and we expect all map pointers to
1822 : // be in map space.
1823 : Map* map = object->map();
1824 : CHECK(map->IsMap());
1825 : CHECK(heap()->map_space()->Contains(map));
1826 :
1827 : // Perform space-specific object verification.
1828 : VerifyObject(object);
1829 :
1830 : // The object itself should look OK.
1831 : object->ObjectVerify();
1832 :
1833 : if (!FLAG_verify_heap_skip_remembered_set) {
1834 : heap()->VerifyRememberedSetFor(object);
1835 : }
1836 :
1837 : // All the interior pointers should be contained in the heap.
1838 : int size = object->Size();
1839 : object->IterateBody(map->instance_type(), size, visitor);
1840 : CHECK(object->address() + size <= top);
1841 : end_of_previous_object = object->address() + size;
1842 : }
1843 : }
1844 : CHECK(allocation_pointer_found_in_space);
1845 : #ifdef DEBUG
1846 : VerifyCountersAfterSweeping();
1847 : #endif
1848 : }
1849 :
1850 : void PagedSpace::VerifyLiveBytes() {
1851 : IncrementalMarking::MarkingState* marking_state =
1852 : heap()->incremental_marking()->marking_state();
1853 : for (Page* page : *this) {
1854 : CHECK(page->SweepingDone());
1855 : HeapObjectIterator it(page);
1856 : int black_size = 0;
1857 : for (HeapObject* object = it.Next(); object != nullptr;
1858 : object = it.Next()) {
1859 : // All the interior pointers should be contained in the heap.
1860 : if (marking_state->IsBlack(object)) {
1861 : black_size += object->Size();
1862 : }
1863 : }
1864 : CHECK_LE(black_size, marking_state->live_bytes(page));
1865 : }
1866 : }
1867 : #endif // VERIFY_HEAP
1868 :
1869 : #ifdef DEBUG
1870 : void PagedSpace::VerifyCountersAfterSweeping() {
1871 : size_t total_capacity = 0;
1872 : size_t total_allocated = 0;
1873 : for (Page* page : *this) {
1874 : DCHECK(page->SweepingDone());
1875 : total_capacity += page->area_size();
1876 : HeapObjectIterator it(page);
1877 : size_t real_allocated = 0;
1878 : for (HeapObject* object = it.Next(); object != nullptr;
1879 : object = it.Next()) {
1880 : if (!object->IsFiller()) {
1881 : real_allocated += object->Size();
1882 : }
1883 : }
1884 : total_allocated += page->allocated_bytes();
1885 : // The real size can be smaller than the accounted size if array trimming,
1886 : // object slack tracking happened after sweeping.
1887 : DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
1888 : DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
1889 : }
1890 : DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
1891 : DCHECK_EQ(total_allocated, accounting_stats_.Size());
1892 : }
1893 :
1894 : void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
1895 : // We need to refine the counters on pages that are already swept and have
1896 : // not been moved over to the actual space. Otherwise, the AccountingStats
1897 : // are just an over approximation.
1898 : RefillFreeList();
1899 :
1900 : size_t total_capacity = 0;
1901 : size_t total_allocated = 0;
1902 : auto marking_state =
1903 : heap()->incremental_marking()->non_atomic_marking_state();
1904 : for (Page* page : *this) {
1905 : size_t page_allocated =
1906 : page->SweepingDone()
1907 : ? page->allocated_bytes()
1908 : : static_cast<size_t>(marking_state->live_bytes(page));
1909 : total_capacity += page->area_size();
1910 : total_allocated += page_allocated;
1911 : DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
1912 : }
1913 : DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
1914 : DCHECK_EQ(total_allocated, accounting_stats_.Size());
1915 : }
1916 : #endif
1917 :
1918 : // -----------------------------------------------------------------------------
1919 : // NewSpace implementation
1920 :
1921 55005 : bool NewSpace::SetUp(size_t initial_semispace_capacity,
1922 : size_t maximum_semispace_capacity) {
1923 : DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1924 : DCHECK(base::bits::IsPowerOfTwo(
1925 : static_cast<uint32_t>(maximum_semispace_capacity)));
1926 :
1927 : to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1928 : from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1929 55005 : if (!to_space_.Commit()) {
1930 : return false;
1931 : }
1932 : DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1933 55005 : ResetAllocationInfo();
1934 :
1935 : // Allocate and set up the histogram arrays if necessary.
1936 55005 : allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1937 55005 : promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1938 : #define SET_NAME(name) \
1939 : allocated_histogram_[name].set_name(#name); \
1940 : promoted_histogram_[name].set_name(#name);
1941 16501500 : INSTANCE_TYPE_LIST(SET_NAME)
1942 : #undef SET_NAME
1943 :
1944 55005 : return true;
1945 : }
1946 :
1947 :
1948 53371 : void NewSpace::TearDown() {
1949 53371 : if (allocated_histogram_) {
1950 : DeleteArray(allocated_histogram_);
1951 53371 : allocated_histogram_ = nullptr;
1952 : }
1953 53371 : if (promoted_histogram_) {
1954 : DeleteArray(promoted_histogram_);
1955 53371 : promoted_histogram_ = nullptr;
1956 : }
1957 :
1958 : allocation_info_.Reset(nullptr, nullptr);
1959 :
1960 53371 : to_space_.TearDown();
1961 53371 : from_space_.TearDown();
1962 53371 : }
1963 :
1964 86452 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1965 :
1966 :
1967 1937 : void NewSpace::Grow() {
1968 : // Double the semispace size but only up to maximum capacity.
1969 : DCHECK(TotalCapacity() < MaximumCapacity());
1970 : size_t new_capacity =
1971 : Min(MaximumCapacity(),
1972 3874 : static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
1973 1937 : if (to_space_.GrowTo(new_capacity)) {
1974 : // Only grow from space if we managed to grow to-space.
1975 1937 : if (!from_space_.GrowTo(new_capacity)) {
1976 : // If we managed to grow to-space but couldn't grow from-space,
1977 : // attempt to shrink to-space.
1978 0 : if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
1979 : // We are in an inconsistent state because we could not
1980 : // commit/uncommit memory from new space.
1981 0 : CHECK(false);
1982 : }
1983 : }
1984 : }
1985 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1986 1937 : }
1987 :
1988 :
1989 22738 : void NewSpace::Shrink() {
1990 22738 : size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
1991 : size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
1992 22913 : if (rounded_new_capacity < TotalCapacity() &&
1993 175 : to_space_.ShrinkTo(rounded_new_capacity)) {
1994 : // Only shrink from-space if we managed to shrink to-space.
1995 175 : from_space_.Reset();
1996 175 : if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1997 : // If we managed to shrink to-space but couldn't shrink from
1998 : // space, attempt to grow to-space again.
1999 0 : if (!to_space_.GrowTo(from_space_.current_capacity())) {
2000 : // We are in an inconsistent state because we could not
2001 : // commit/uncommit memory from new space.
2002 0 : CHECK(false);
2003 : }
2004 : }
2005 : }
2006 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2007 22738 : }
2008 :
2009 56800 : bool NewSpace::Rebalance() {
2010 : // Order here is important to make use of the page pool.
2011 113600 : return to_space_.EnsureCurrentCapacity() &&
2012 113600 : from_space_.EnsureCurrentCapacity();
2013 : }
2014 :
2015 113600 : bool SemiSpace::EnsureCurrentCapacity() {
2016 113600 : if (is_committed()) {
2017 : const int expected_pages =
2018 113600 : static_cast<int>(current_capacity_ / Page::kPageSize);
2019 : int actual_pages = 0;
2020 : Page* current_page = anchor()->next_page();
2021 505895 : while (current_page != anchor()) {
2022 278695 : actual_pages++;
2023 : current_page = current_page->next_page();
2024 278695 : if (actual_pages > expected_pages) {
2025 : Page* to_remove = current_page->prev_page();
2026 : // Make sure we don't overtake the actual top pointer.
2027 995 : CHECK_NE(to_remove, current_page_);
2028 995 : to_remove->Unlink();
2029 : // Clear new space flags to avoid this page being treated as a new
2030 : // space page that is potentially being swept.
2031 : to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
2032 : heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2033 4139 : to_remove);
2034 : }
2035 : }
2036 : IncrementalMarking::NonAtomicMarkingState* marking_state =
2037 : heap()->incremental_marking()->non_atomic_marking_state();
2038 115172 : while (actual_pages < expected_pages) {
2039 1572 : actual_pages++;
2040 : current_page =
2041 : heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2042 1572 : Page::kAllocatableMemory, this, executable());
2043 1572 : if (current_page == nullptr) return false;
2044 : DCHECK_NOT_NULL(current_page);
2045 1572 : current_page->InsertAfter(anchor());
2046 : marking_state->ClearLiveness(current_page);
2047 : current_page->SetFlags(anchor()->prev_page()->GetFlags(),
2048 1572 : static_cast<uintptr_t>(Page::kCopyAllFlags));
2049 : heap()->CreateFillerObjectAt(current_page->area_start(),
2050 : static_cast<int>(current_page->area_size()),
2051 3144 : ClearRecordedSlots::kNo);
2052 : }
2053 : }
2054 : return true;
2055 : }
2056 :
2057 1001491 : AllocationInfo LocalAllocationBuffer::Close() {
2058 1001491 : if (IsValid()) {
2059 : heap_->CreateFillerObjectAt(
2060 : allocation_info_.top(),
2061 89838 : static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2062 89838 : ClearRecordedSlots::kNo);
2063 89838 : const AllocationInfo old_info = allocation_info_;
2064 89838 : allocation_info_ = AllocationInfo(nullptr, nullptr);
2065 89838 : return old_info;
2066 : }
2067 911653 : return AllocationInfo(nullptr, nullptr);
2068 : }
2069 :
2070 :
2071 342675 : LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
2072 : AllocationInfo allocation_info)
2073 342675 : : heap_(heap), allocation_info_(allocation_info) {
2074 342675 : if (IsValid()) {
2075 : heap_->CreateFillerObjectAt(
2076 : allocation_info_.top(),
2077 185933 : static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2078 185933 : ClearRecordedSlots::kNo);
2079 : }
2080 342671 : }
2081 :
2082 :
2083 186055 : LocalAllocationBuffer::LocalAllocationBuffer(
2084 : const LocalAllocationBuffer& other) {
2085 : *this = other;
2086 186057 : }
2087 :
2088 :
2089 186287 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2090 : const LocalAllocationBuffer& other) {
2091 372342 : Close();
2092 372344 : heap_ = other.heap_;
2093 372344 : allocation_info_ = other.allocation_info_;
2094 :
2095 : // This is needed since we (a) cannot yet use move-semantics, and (b) want
2096 : // to make the use of the class easy by it as value and (c) implicitly call
2097 : // {Close} upon copy.
2098 : const_cast<LocalAllocationBuffer&>(other)
2099 : .allocation_info_.Reset(nullptr, nullptr);
2100 186287 : return *this;
2101 : }
2102 :
2103 :
2104 215120 : void NewSpace::UpdateAllocationInfo() {
2105 215120 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2106 215120 : allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
2107 : original_top_.SetValue(top());
2108 : original_limit_.SetValue(limit());
2109 215120 : UpdateInlineAllocationLimit(0);
2110 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2111 215120 : }
2112 :
2113 :
2114 141457 : void NewSpace::ResetAllocationInfo() {
2115 282914 : Address old_top = allocation_info_.top();
2116 141457 : to_space_.Reset();
2117 141457 : UpdateAllocationInfo();
2118 : // Clear all mark-bits in the to-space.
2119 : IncrementalMarking::NonAtomicMarkingState* marking_state =
2120 353518 : heap()->incremental_marking()->non_atomic_marking_state();
2121 989950 : for (Page* p : to_space_) {
2122 : marking_state->ClearLiveness(p);
2123 : // Concurrent marking may have local live bytes for this page.
2124 353518 : heap()->concurrent_marking()->ClearLiveness(p);
2125 : }
2126 141457 : InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
2127 141457 : }
2128 :
2129 :
2130 690617 : void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
2131 690617 : if (heap()->inline_allocation_disabled()) {
2132 : // Lowest limit when linear allocation was disabled.
2133 690617 : Address high = to_space_.page_high();
2134 395503 : Address new_top = allocation_info_.top() + size_in_bytes;
2135 : allocation_info_.set_limit(Min(new_top, high));
2136 660707 : } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
2137 : // Normal limit is the end of the current page.
2138 : allocation_info_.set_limit(to_space_.page_high());
2139 : } else {
2140 : // Lower limit during incremental marking.
2141 : Address high = to_space_.page_high();
2142 365593 : Address new_top = allocation_info_.top() + size_in_bytes;
2143 365593 : Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
2144 : allocation_info_.set_limit(Min(new_limit, high));
2145 : }
2146 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2147 690617 : }
2148 :
2149 :
2150 100135 : bool NewSpace::AddFreshPage() {
2151 100135 : Address top = allocation_info_.top();
2152 : DCHECK(!Page::IsAtObjectStart(top));
2153 100135 : if (!to_space_.AdvancePage()) {
2154 : // No more pages left to advance.
2155 : return false;
2156 : }
2157 :
2158 : // Clear remainder of current page.
2159 73663 : Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2160 73663 : int remaining_in_page = static_cast<int>(limit - top);
2161 73663 : heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2162 73663 : UpdateAllocationInfo();
2163 :
2164 73663 : return true;
2165 : }
2166 :
2167 :
2168 0 : bool NewSpace::AddFreshPageSynchronized() {
2169 0 : base::LockGuard<base::Mutex> guard(&mutex_);
2170 0 : return AddFreshPage();
2171 : }
2172 :
2173 :
2174 249102 : bool NewSpace::EnsureAllocation(int size_in_bytes,
2175 : AllocationAlignment alignment) {
2176 618396 : Address old_top = allocation_info_.top();
2177 322408 : Address high = to_space_.page_high();
2178 249102 : int filler_size = Heap::GetFillToAlign(old_top, alignment);
2179 249102 : int aligned_size_in_bytes = size_in_bytes + filler_size;
2180 :
2181 249102 : if (old_top + aligned_size_in_bytes > high) {
2182 : // Not enough room in the page, try to allocate a new one.
2183 99726 : if (!AddFreshPage()) {
2184 : return false;
2185 : }
2186 :
2187 73306 : InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
2188 :
2189 : old_top = allocation_info_.top();
2190 : high = to_space_.page_high();
2191 73306 : filler_size = Heap::GetFillToAlign(old_top, alignment);
2192 : }
2193 :
2194 : DCHECK(old_top + aligned_size_in_bytes <= high);
2195 :
2196 222682 : if (allocation_info_.limit() < high) {
2197 : // Either the limit has been lowered because linear allocation was disabled
2198 : // or because incremental marking wants to get a chance to do a step,
2199 : // or because idle scavenge job wants to get a chance to post a task.
2200 : // Set the new limit accordingly.
2201 173214 : Address new_top = old_top + aligned_size_in_bytes;
2202 173214 : Address soon_object = old_top + filler_size;
2203 173214 : InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2204 173214 : UpdateInlineAllocationLimit(aligned_size_in_bytes);
2205 : }
2206 : return true;
2207 : }
2208 :
2209 :
2210 233279 : void NewSpace::StartNextInlineAllocationStep() {
2211 233279 : if (!allocation_observers_paused_) {
2212 : top_on_previous_step_ =
2213 214388 : !allocation_observers_.empty() ? allocation_info_.top() : 0;
2214 214388 : UpdateInlineAllocationLimit(0);
2215 : }
2216 233279 : }
2217 :
2218 86524 : void NewSpace::PauseAllocationObservers() {
2219 : // Do a step to account for memory allocated so far.
2220 86524 : InlineAllocationStep(top(), top(), nullptr, 0);
2221 : Space::PauseAllocationObservers();
2222 86524 : top_on_previous_step_ = 0;
2223 86524 : UpdateInlineAllocationLimit(0);
2224 86524 : }
2225 :
2226 259572 : void PagedSpace::PauseAllocationObservers() {
2227 : // Do a step to account for memory allocated so far.
2228 259572 : if (top_on_previous_step_) {
2229 20716 : int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
2230 20716 : AllocationStep(bytes_allocated, nullptr, 0);
2231 : }
2232 : Space::PauseAllocationObservers();
2233 259572 : top_on_previous_step_ = 0;
2234 259572 : }
2235 :
2236 86524 : void NewSpace::ResumeAllocationObservers() {
2237 : DCHECK_NULL(top_on_previous_step_);
2238 : Space::ResumeAllocationObservers();
2239 86524 : StartNextInlineAllocationStep();
2240 86524 : }
2241 :
2242 : // TODO(ofrobots): refactor into SpaceWithLinearArea
2243 259572 : void PagedSpace::ResumeAllocationObservers() {
2244 : DCHECK_NULL(top_on_previous_step_);
2245 : Space::ResumeAllocationObservers();
2246 259572 : StartNextInlineAllocationStep();
2247 259572 : }
2248 :
2249 474501 : void NewSpace::InlineAllocationStep(Address top, Address new_top,
2250 : Address soon_object, size_t size) {
2251 474501 : if (top_on_previous_step_) {
2252 320570 : int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
2253 1115860 : for (AllocationObserver* observer : allocation_observers_) {
2254 474720 : observer->AllocationStep(bytes_allocated, soon_object, size);
2255 : }
2256 320570 : top_on_previous_step_ = new_top;
2257 : }
2258 474501 : }
2259 :
2260 11093 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
2261 11093 : return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
2262 : }
2263 :
2264 : #ifdef VERIFY_HEAP
2265 : // We do not use the SemiSpaceIterator because verification doesn't assume
2266 : // that it works (it depends on the invariants we are checking).
2267 : void NewSpace::Verify() {
2268 : // The allocation pointer should be in the space or at the very end.
2269 : DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2270 :
2271 : // There should be objects packed in from the low address up to the
2272 : // allocation pointer.
2273 : Address current = to_space_.first_page()->area_start();
2274 : CHECK_EQ(current, to_space_.space_start());
2275 :
2276 : while (current != top()) {
2277 : if (!Page::IsAlignedToPageSize(current)) {
2278 : // The allocation pointer should not be in the middle of an object.
2279 : CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2280 : current < top());
2281 :
2282 : HeapObject* object = HeapObject::FromAddress(current);
2283 :
2284 : // The first word should be a map, and we expect all map pointers to
2285 : // be in map space.
2286 : Map* map = object->map();
2287 : CHECK(map->IsMap());
2288 : CHECK(heap()->map_space()->Contains(map));
2289 :
2290 : // The object should not be code or a map.
2291 : CHECK(!object->IsMap());
2292 : CHECK(!object->IsAbstractCode());
2293 :
2294 : // The object itself should look OK.
2295 : object->ObjectVerify();
2296 :
2297 : // All the interior pointers should be contained in the heap.
2298 : VerifyPointersVisitor visitor;
2299 : int size = object->Size();
2300 : object->IterateBody(map->instance_type(), size, &visitor);
2301 :
2302 : current += size;
2303 : } else {
2304 : // At end of page, switch to next page.
2305 : Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2306 : // Next page should be valid.
2307 : CHECK(!page->is_anchor());
2308 : current = page->area_start();
2309 : }
2310 : }
2311 :
2312 : // Check semi-spaces.
2313 : CHECK_EQ(from_space_.id(), kFromSpace);
2314 : CHECK_EQ(to_space_.id(), kToSpace);
2315 : from_space_.Verify();
2316 : to_space_.Verify();
2317 : }
2318 : #endif
2319 :
2320 : // -----------------------------------------------------------------------------
2321 : // SemiSpace implementation
2322 :
2323 0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
2324 : DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
2325 110010 : minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
2326 110010 : current_capacity_ = minimum_capacity_;
2327 110010 : maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2328 110010 : committed_ = false;
2329 0 : }
2330 :
2331 :
2332 106742 : void SemiSpace::TearDown() {
2333 : // Properly uncommit memory to keep the allocator counters in sync.
2334 106742 : if (is_committed()) {
2335 423550 : for (Page* p : *this) {
2336 146286 : ArrayBufferTracker::FreeAll(p);
2337 : }
2338 65489 : Uncommit();
2339 : }
2340 106742 : current_capacity_ = maximum_capacity_ = 0;
2341 106742 : }
2342 :
2343 :
2344 83828 : bool SemiSpace::Commit() {
2345 : DCHECK(!is_committed());
2346 83828 : Page* current = anchor();
2347 83828 : const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
2348 252594 : for (int pages_added = 0; pages_added < num_pages; pages_added++) {
2349 : Page* new_page =
2350 : heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2351 168766 : Page::kAllocatableMemory, this, executable());
2352 168766 : if (new_page == nullptr) {
2353 0 : RewindPages(current, pages_added);
2354 0 : return false;
2355 : }
2356 168766 : new_page->InsertAfter(current);
2357 : current = new_page;
2358 : }
2359 83828 : Reset();
2360 83828 : AccountCommitted(current_capacity_);
2361 83828 : if (age_mark_ == nullptr) {
2362 72204 : age_mark_ = first_page()->area_start();
2363 : }
2364 83828 : committed_ = true;
2365 83828 : return true;
2366 : }
2367 :
2368 :
2369 81626 : bool SemiSpace::Uncommit() {
2370 : DCHECK(is_committed());
2371 342142 : for (auto it = begin(); it != end();) {
2372 : Page* p = *(it++);
2373 260516 : heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
2374 : }
2375 : anchor()->set_next_page(anchor());
2376 : anchor()->set_prev_page(anchor());
2377 81626 : AccountUncommitted(current_capacity_);
2378 81626 : committed_ = false;
2379 81626 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2380 81626 : return true;
2381 : }
2382 :
2383 :
2384 7 : size_t SemiSpace::CommittedPhysicalMemory() {
2385 7 : if (!is_committed()) return 0;
2386 : size_t size = 0;
2387 42 : for (Page* p : *this) {
2388 14 : size += p->CommittedPhysicalMemory();
2389 : }
2390 7 : return size;
2391 : }
2392 :
2393 3874 : bool SemiSpace::GrowTo(size_t new_capacity) {
2394 3874 : if (!is_committed()) {
2395 85 : if (!Commit()) return false;
2396 : }
2397 : DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2398 : DCHECK_LE(new_capacity, maximum_capacity_);
2399 : DCHECK_GT(new_capacity, current_capacity_);
2400 3874 : const size_t delta = new_capacity - current_capacity_;
2401 : DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
2402 3874 : const int delta_pages = static_cast<int>(delta / Page::kPageSize);
2403 : Page* last_page = anchor()->prev_page();
2404 : DCHECK_NE(last_page, anchor());
2405 : IncrementalMarking::NonAtomicMarkingState* marking_state =
2406 16496 : heap()->incremental_marking()->non_atomic_marking_state();
2407 20370 : for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
2408 : Page* new_page =
2409 : heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2410 16496 : Page::kAllocatableMemory, this, executable());
2411 16496 : if (new_page == nullptr) {
2412 0 : RewindPages(last_page, pages_added);
2413 0 : return false;
2414 : }
2415 16496 : new_page->InsertAfter(last_page);
2416 : marking_state->ClearLiveness(new_page);
2417 : // Duplicate the flags that was set on the old page.
2418 16496 : new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
2419 : last_page = new_page;
2420 : }
2421 : AccountCommitted(delta);
2422 3874 : current_capacity_ = new_capacity;
2423 3874 : return true;
2424 : }
2425 :
2426 0 : void SemiSpace::RewindPages(Page* start, int num_pages) {
2427 : Page* new_last_page = nullptr;
2428 : Page* last_page = start;
2429 0 : while (num_pages > 0) {
2430 : DCHECK_NE(last_page, anchor());
2431 : new_last_page = last_page->prev_page();
2432 : last_page->prev_page()->set_next_page(last_page->next_page());
2433 : last_page->next_page()->set_prev_page(last_page->prev_page());
2434 : last_page = new_last_page;
2435 0 : num_pages--;
2436 : }
2437 0 : }
2438 :
2439 350 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
2440 : DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2441 : DCHECK_GE(new_capacity, minimum_capacity_);
2442 : DCHECK_LT(new_capacity, current_capacity_);
2443 350 : if (is_committed()) {
2444 350 : const size_t delta = current_capacity_ - new_capacity;
2445 : DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
2446 350 : int delta_pages = static_cast<int>(delta / Page::kPageSize);
2447 : Page* new_last_page;
2448 : Page* last_page;
2449 2644 : while (delta_pages > 0) {
2450 : last_page = anchor()->prev_page();
2451 : new_last_page = last_page->prev_page();
2452 1944 : new_last_page->set_next_page(anchor());
2453 : anchor()->set_prev_page(new_last_page);
2454 : heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2455 2294 : last_page);
2456 1944 : delta_pages--;
2457 : }
2458 : AccountUncommitted(delta);
2459 350 : heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2460 : }
2461 350 : current_capacity_ = new_capacity;
2462 350 : return true;
2463 : }
2464 :
2465 172904 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2466 172904 : anchor_.set_owner(this);
2467 172904 : anchor_.prev_page()->set_next_page(&anchor_);
2468 : anchor_.next_page()->set_prev_page(&anchor_);
2469 :
2470 1318160 : for (Page* page : *this) {
2471 : page->set_owner(this);
2472 486176 : page->SetFlags(flags, mask);
2473 486176 : if (id_ == kToSpace) {
2474 : page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2475 : page->SetFlag(MemoryChunk::IN_TO_SPACE);
2476 : page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2477 : heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
2478 : page, 0);
2479 : } else {
2480 : page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2481 : page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2482 : }
2483 : DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2484 : page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2485 : }
2486 172904 : }
2487 :
2488 :
2489 225460 : void SemiSpace::Reset() {
2490 : DCHECK_NE(anchor_.next_page(), &anchor_);
2491 225460 : current_page_ = anchor_.next_page();
2492 225460 : pages_used_ = 0;
2493 225460 : }
2494 :
2495 995 : void SemiSpace::RemovePage(Page* page) {
2496 995 : if (current_page_ == page) {
2497 192 : current_page_ = page->prev_page();
2498 : }
2499 995 : page->Unlink();
2500 995 : }
2501 :
2502 995 : void SemiSpace::PrependPage(Page* page) {
2503 : page->SetFlags(current_page()->GetFlags(),
2504 995 : static_cast<uintptr_t>(Page::kCopyAllFlags));
2505 995 : page->set_owner(this);
2506 995 : page->InsertAfter(anchor());
2507 995 : pages_used_++;
2508 995 : }
2509 :
2510 86452 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2511 : // We won't be swapping semispaces without data in them.
2512 : DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
2513 : DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
2514 :
2515 86452 : intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2516 :
2517 : // We swap all properties but id_.
2518 : std::swap(from->current_capacity_, to->current_capacity_);
2519 : std::swap(from->maximum_capacity_, to->maximum_capacity_);
2520 : std::swap(from->minimum_capacity_, to->minimum_capacity_);
2521 : std::swap(from->age_mark_, to->age_mark_);
2522 : std::swap(from->committed_, to->committed_);
2523 86452 : std::swap(from->anchor_, to->anchor_);
2524 : std::swap(from->current_page_, to->current_page_);
2525 :
2526 86452 : to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2527 86452 : from->FixPagesFlags(0, 0);
2528 86452 : }
2529 :
2530 86452 : void SemiSpace::set_age_mark(Address mark) {
2531 : DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2532 86452 : age_mark_ = mark;
2533 : // Mark all pages up to the one containing mark.
2534 458992 : for (Page* p : PageRange(space_start(), mark)) {
2535 : p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2536 : }
2537 86452 : }
2538 :
2539 0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2540 : // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2541 0 : UNREACHABLE();
2542 : }
2543 :
2544 : #ifdef DEBUG
2545 : void SemiSpace::Print() {}
2546 : #endif
2547 :
2548 : #ifdef VERIFY_HEAP
2549 : void SemiSpace::Verify() {
2550 : bool is_from_space = (id_ == kFromSpace);
2551 : Page* page = anchor_.next_page();
2552 : CHECK(anchor_.owner() == this);
2553 : while (page != &anchor_) {
2554 : CHECK_EQ(page->owner(), this);
2555 : CHECK(page->InNewSpace());
2556 : CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2557 : : MemoryChunk::IN_TO_SPACE));
2558 : CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2559 : : MemoryChunk::IN_FROM_SPACE));
2560 : CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2561 : if (!is_from_space) {
2562 : // The pointers-from-here-are-interesting flag isn't updated dynamically
2563 : // on from-space pages, so it might be out of sync with the marking state.
2564 : if (page->heap()->incremental_marking()->IsMarking()) {
2565 : CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2566 : } else {
2567 : CHECK(
2568 : !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2569 : }
2570 : }
2571 : CHECK_EQ(page->prev_page()->next_page(), page);
2572 : page = page->next_page();
2573 : }
2574 : }
2575 : #endif
2576 :
2577 : #ifdef DEBUG
2578 : void SemiSpace::AssertValidRange(Address start, Address end) {
2579 : // Addresses belong to same semi-space
2580 : Page* page = Page::FromAllocationAreaAddress(start);
2581 : Page* end_page = Page::FromAllocationAreaAddress(end);
2582 : SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2583 : DCHECK_EQ(space, end_page->owner());
2584 : // Start address is before end address, either on same page,
2585 : // or end address is on a later page in the linked list of
2586 : // semi-space pages.
2587 : if (page == end_page) {
2588 : DCHECK_LE(start, end);
2589 : } else {
2590 : while (page != end_page) {
2591 : page = page->next_page();
2592 : DCHECK_NE(page, space->anchor());
2593 : }
2594 : }
2595 : }
2596 : #endif
2597 :
2598 :
2599 : // -----------------------------------------------------------------------------
2600 : // SemiSpaceIterator implementation.
2601 :
2602 11093 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2603 : Initialize(space->bottom(), space->top());
2604 0 : }
2605 :
2606 :
2607 0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
2608 : SemiSpace::AssertValidRange(start, end);
2609 11093 : current_ = start;
2610 11093 : limit_ = end;
2611 0 : }
2612 :
2613 : #ifdef DEBUG
2614 : // heap_histograms is shared, always clear it before using it.
2615 : static void ClearHistograms(Isolate* isolate) {
2616 : // We reset the name each time, though it hasn't changed.
2617 : #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2618 : INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2619 : #undef DEF_TYPE_NAME
2620 :
2621 : #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2622 : INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2623 : #undef CLEAR_HISTOGRAM
2624 :
2625 : isolate->js_spill_information()->Clear();
2626 : }
2627 :
2628 : static int CollectHistogramInfo(HeapObject* obj) {
2629 : Isolate* isolate = obj->GetIsolate();
2630 : InstanceType type = obj->map()->instance_type();
2631 : DCHECK(0 <= type && type <= LAST_TYPE);
2632 : DCHECK_NOT_NULL(isolate->heap_histograms()[type].name());
2633 : isolate->heap_histograms()[type].increment_number(1);
2634 : isolate->heap_histograms()[type].increment_bytes(obj->Size());
2635 :
2636 : if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2637 : JSObject::cast(obj)
2638 : ->IncrementSpillStatistics(isolate->js_spill_information());
2639 : }
2640 :
2641 : return obj->Size();
2642 : }
2643 :
2644 :
2645 : static void ReportHistogram(Isolate* isolate, bool print_spill) {
2646 : PrintF("\n Object Histogram:\n");
2647 : for (int i = 0; i <= LAST_TYPE; i++) {
2648 : if (isolate->heap_histograms()[i].number() > 0) {
2649 : PrintF(" %-34s%10d (%10d bytes)\n",
2650 : isolate->heap_histograms()[i].name(),
2651 : isolate->heap_histograms()[i].number(),
2652 : isolate->heap_histograms()[i].bytes());
2653 : }
2654 : }
2655 : PrintF("\n");
2656 :
2657 : // Summarize string types.
2658 : int string_number = 0;
2659 : int string_bytes = 0;
2660 : #define INCREMENT(type, size, name, camel_name) \
2661 : string_number += isolate->heap_histograms()[type].number(); \
2662 : string_bytes += isolate->heap_histograms()[type].bytes();
2663 : STRING_TYPE_LIST(INCREMENT)
2664 : #undef INCREMENT
2665 : if (string_number > 0) {
2666 : PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2667 : string_bytes);
2668 : }
2669 :
2670 : if (FLAG_collect_heap_spill_statistics && print_spill) {
2671 : isolate->js_spill_information()->Print();
2672 : }
2673 : }
2674 : #endif // DEBUG
2675 :
2676 :
2677 : // Support for statistics gathering for --heap-stats and --log-gc.
2678 0 : void NewSpace::ClearHistograms() {
2679 0 : for (int i = 0; i <= LAST_TYPE; i++) {
2680 0 : allocated_histogram_[i].clear();
2681 0 : promoted_histogram_[i].clear();
2682 : }
2683 0 : }
2684 :
2685 :
2686 : // Because the copying collector does not touch garbage objects, we iterate
2687 : // the new space before a collection to get a histogram of allocated objects.
2688 : // This only happens when --log-gc flag is set.
2689 0 : void NewSpace::CollectStatistics() {
2690 : ClearHistograms();
2691 : SemiSpaceIterator it(this);
2692 0 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next())
2693 0 : RecordAllocation(obj);
2694 0 : }
2695 :
2696 :
2697 0 : static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2698 : const char* description) {
2699 0 : LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2700 : // Lump all the string types together.
2701 : int string_number = 0;
2702 : int string_bytes = 0;
2703 : #define INCREMENT(type, size, name, camel_name) \
2704 : string_number += info[type].number(); \
2705 : string_bytes += info[type].bytes();
2706 0 : STRING_TYPE_LIST(INCREMENT)
2707 : #undef INCREMENT
2708 0 : if (string_number > 0) {
2709 0 : LOG(isolate,
2710 : HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2711 : }
2712 :
2713 : // Then do the other types.
2714 0 : for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2715 0 : if (info[i].number() > 0) {
2716 0 : LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2717 : info[i].bytes()));
2718 : }
2719 : }
2720 0 : LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2721 0 : }
2722 :
2723 :
2724 0 : void NewSpace::ReportStatistics() {
2725 : #ifdef DEBUG
2726 : if (FLAG_heap_stats) {
2727 : float pct = static_cast<float>(Available()) / TotalCapacity();
2728 : PrintF(" capacity: %" PRIuS ", available: %" PRIuS ", %%%d\n",
2729 : TotalCapacity(), Available(), static_cast<int>(pct * 100));
2730 : PrintF("\n Object Histogram:\n");
2731 : for (int i = 0; i <= LAST_TYPE; i++) {
2732 : if (allocated_histogram_[i].number() > 0) {
2733 : PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2734 : allocated_histogram_[i].number(),
2735 : allocated_histogram_[i].bytes());
2736 : }
2737 : }
2738 : PrintF("\n");
2739 : }
2740 : #endif // DEBUG
2741 :
2742 0 : if (FLAG_log_gc) {
2743 0 : Isolate* isolate = heap()->isolate();
2744 0 : DoReportStatistics(isolate, allocated_histogram_, "allocated");
2745 0 : DoReportStatistics(isolate, promoted_histogram_, "promoted");
2746 : }
2747 0 : }
2748 :
2749 :
2750 0 : void NewSpace::RecordAllocation(HeapObject* obj) {
2751 : InstanceType type = obj->map()->instance_type();
2752 : DCHECK(0 <= type && type <= LAST_TYPE);
2753 0 : allocated_histogram_[type].increment_number(1);
2754 0 : allocated_histogram_[type].increment_bytes(obj->Size());
2755 0 : }
2756 :
2757 :
2758 0 : void NewSpace::RecordPromotion(HeapObject* obj) {
2759 : InstanceType type = obj->map()->instance_type();
2760 : DCHECK(0 <= type && type <= LAST_TYPE);
2761 0 : promoted_histogram_[type].increment_number(1);
2762 0 : promoted_histogram_[type].increment_bytes(obj->Size());
2763 0 : }
2764 :
2765 :
2766 6 : size_t NewSpace::CommittedPhysicalMemory() {
2767 6 : if (!base::OS::HasLazyCommits()) return CommittedMemory();
2768 6 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2769 6 : size_t size = to_space_.CommittedPhysicalMemory();
2770 6 : if (from_space_.is_committed()) {
2771 1 : size += from_space_.CommittedPhysicalMemory();
2772 : }
2773 6 : return size;
2774 : }
2775 :
2776 :
2777 : // -----------------------------------------------------------------------------
2778 : // Free lists for old object spaces implementation
2779 :
2780 :
2781 0 : void FreeListCategory::Reset() {
2782 : set_top(nullptr);
2783 : set_prev(nullptr);
2784 : set_next(nullptr);
2785 986605 : available_ = 0;
2786 0 : }
2787 :
2788 1091585 : FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
2789 : DCHECK(page()->CanAllocate());
2790 :
2791 : FreeSpace* node = top();
2792 1091585 : if (node == nullptr) return nullptr;
2793 : set_top(node->next());
2794 1037945 : *node_size = node->Size();
2795 1037945 : available_ -= *node_size;
2796 0 : return node;
2797 : }
2798 :
2799 26652 : FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
2800 : size_t* node_size) {
2801 : DCHECK(page()->CanAllocate());
2802 :
2803 : FreeSpace* node = PickNodeFromList(node_size);
2804 26652 : if ((node != nullptr) && (*node_size < minimum_size)) {
2805 17132 : Free(node, *node_size, kLinkCategory);
2806 17132 : *node_size = 0;
2807 17132 : return nullptr;
2808 : }
2809 : return node;
2810 : }
2811 :
2812 601846 : FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2813 601846 : size_t* node_size) {
2814 : DCHECK(page()->CanAllocate());
2815 :
2816 : FreeSpace* prev_non_evac_node = nullptr;
2817 1204608 : for (FreeSpace* cur_node = top(); cur_node != nullptr;
2818 : cur_node = cur_node->next()) {
2819 520022 : size_t size = cur_node->size();
2820 520022 : if (size >= minimum_size) {
2821 : DCHECK_GE(available_, size);
2822 519106 : available_ -= size;
2823 519106 : if (cur_node == top()) {
2824 : set_top(cur_node->next());
2825 : }
2826 519106 : if (prev_non_evac_node != nullptr) {
2827 : prev_non_evac_node->set_next(cur_node->next());
2828 : }
2829 519106 : *node_size = size;
2830 519106 : return cur_node;
2831 : }
2832 :
2833 : prev_non_evac_node = cur_node;
2834 : }
2835 : return nullptr;
2836 : }
2837 :
2838 19221912 : void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
2839 21425605 : FreeMode mode) {
2840 19221912 : CHECK(page()->CanAllocate());
2841 : free_space->set_next(top());
2842 : set_top(free_space);
2843 19221912 : available_ += size_in_bytes;
2844 21425605 : if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2845 893122 : owner()->AddCategory(this);
2846 : }
2847 19221912 : }
2848 :
2849 :
2850 165276 : void FreeListCategory::RepairFreeList(Heap* heap) {
2851 : FreeSpace* n = top();
2852 165276 : while (n != nullptr) {
2853 : Map** map_location = reinterpret_cast<Map**>(n->address());
2854 186 : if (*map_location == nullptr) {
2855 186 : *map_location = heap->free_space_map();
2856 : } else {
2857 : DCHECK(*map_location == heap->free_space_map());
2858 : }
2859 : n = n->next();
2860 : }
2861 0 : }
2862 :
2863 5421510 : void FreeListCategory::Relink() {
2864 : DCHECK(!is_linked());
2865 5421510 : owner()->AddCategory(this);
2866 5421510 : }
2867 :
2868 0 : void FreeListCategory::Invalidate() {
2869 : Reset();
2870 89370 : type_ = kInvalidCategory;
2871 0 : }
2872 :
2873 478070 : FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2874 2868420 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2875 2868420 : categories_[i] = nullptr;
2876 : }
2877 478070 : Reset();
2878 0 : }
2879 :
2880 :
2881 813649 : void FreeList::Reset() {
2882 : ForAllFreeListCategories(
2883 : [](FreeListCategory* category) { category->Reset(); });
2884 4881894 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2885 4881894 : categories_[i] = nullptr;
2886 : }
2887 813649 : ResetStats();
2888 813649 : }
2889 :
2890 61021899 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2891 30785173 : if (size_in_bytes == 0) return 0;
2892 :
2893 : owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2894 60473452 : ClearRecordedSlots::kNo);
2895 :
2896 : Page* page = Page::FromAddress(start);
2897 : page->DecreaseAllocatedBytes(size_in_bytes);
2898 :
2899 : // Blocks have to be a minimum size to hold free list items.
2900 30244708 : if (size_in_bytes < kMinBlockSize) {
2901 : page->add_wasted_memory(size_in_bytes);
2902 : wasted_bytes_.Increment(size_in_bytes);
2903 11052564 : return size_in_bytes;
2904 : }
2905 :
2906 19202015 : FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2907 : // Insert other blocks at the head of a free list of the appropriate
2908 : // magnitude.
2909 : FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2910 19202015 : page->free_list_category(type)->Free(free_space, size_in_bytes, mode);
2911 : DCHECK_EQ(page->AvailableInFreeList(),
2912 : page->AvailableInFreeListFromAllocatedBytes());
2913 19199447 : return 0;
2914 : }
2915 :
2916 3038613 : FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
2917 : FreeListCategoryIterator it(this, type);
2918 : FreeSpace* node = nullptr;
2919 6128973 : while (it.HasNext()) {
2920 : FreeListCategory* current = it.Next();
2921 : node = current->PickNodeFromList(node_size);
2922 1064933 : if (node != nullptr) {
2923 : DCHECK(IsVeryLong() || Available() == SumFreeLists());
2924 : return node;
2925 : }
2926 : RemoveCategory(current);
2927 : }
2928 : return node;
2929 : }
2930 :
2931 0 : FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
2932 : size_t minimum_size) {
2933 362106 : if (categories_[type] == nullptr) return nullptr;
2934 : FreeSpace* node =
2935 26652 : categories_[type]->TryPickNodeFromList(minimum_size, node_size);
2936 : if (node != nullptr) {
2937 : DCHECK(IsVeryLong() || Available() == SumFreeLists());
2938 : }
2939 0 : return node;
2940 : }
2941 :
2942 1031356 : FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2943 : size_t* node_size,
2944 : size_t minimum_size) {
2945 : FreeListCategoryIterator it(this, type);
2946 : FreeSpace* node = nullptr;
2947 2145452 : while (it.HasNext()) {
2948 : FreeListCategory* current = it.Next();
2949 601846 : node = current->SearchForNodeInList(minimum_size, node_size);
2950 601847 : if (node != nullptr) {
2951 : DCHECK(IsVeryLong() || Available() == SumFreeLists());
2952 : return node;
2953 : }
2954 82740 : if (current->is_empty()) {
2955 : RemoveCategory(current);
2956 : }
2957 : }
2958 : return node;
2959 : }
2960 :
2961 2044549 : FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
2962 : FreeSpace* node = nullptr;
2963 :
2964 : // First try the allocation fast path: try to allocate the minimum element
2965 : // size of a free list category. This operation is constant time.
2966 : FreeListCategoryType type =
2967 : SelectFastAllocationFreeListCategoryType(size_in_bytes);
2968 5083164 : for (int i = type; i < kHuge && node == nullptr; i++) {
2969 3038610 : node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
2970 : }
2971 :
2972 2044554 : if (node == nullptr) {
2973 : // Next search the huge list for free list nodes. This takes linear time in
2974 : // the number of huge elements.
2975 1031366 : node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2976 : }
2977 :
2978 2044563 : if (node == nullptr && type != kHuge) {
2979 : // We didn't find anything in the huge list. Now search the best fitting
2980 : // free list for a node that has at least the requested size.
2981 : type = SelectFreeListCategoryType(size_in_bytes);
2982 : node = TryFindNodeIn(type, node_size, size_in_bytes);
2983 : }
2984 :
2985 2044563 : if (node != nullptr) {
2986 1539919 : Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
2987 : }
2988 :
2989 : DCHECK(IsVeryLong() || Available() == SumFreeLists());
2990 2044563 : return node;
2991 : }
2992 :
2993 2044553 : bool FreeList::Allocate(size_t size_in_bytes) {
2994 : DCHECK_GE(kMaxBlockSize, size_in_bytes);
2995 : DCHECK(IsAligned(size_in_bytes, kPointerSize));
2996 : DCHECK_LE(owner_->top(), owner_->limit());
2997 : #ifdef DEBUG
2998 : if (owner_->top() != owner_->limit()) {
2999 : DCHECK_EQ(Page::FromAddress(owner_->top()),
3000 : Page::FromAddress(owner_->limit() - 1));
3001 : }
3002 : #endif
3003 : // Don't free list allocate if there is linear space available.
3004 : DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
3005 : size_in_bytes);
3006 :
3007 : // Mark the old linear allocation area with a free space map so it can be
3008 : // skipped when scanning the heap. This also puts it back in the free list
3009 : // if it is big enough.
3010 2044553 : owner_->EmptyAllocationInfo();
3011 :
3012 2044551 : if (!owner_->is_local()) {
3013 : owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3014 1789999 : Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
3015 : }
3016 :
3017 2044557 : size_t new_node_size = 0;
3018 2044557 : FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
3019 2044556 : if (new_node == nullptr) return false;
3020 :
3021 : DCHECK_GE(new_node_size, size_in_bytes);
3022 :
3023 : #ifdef DEBUG
3024 : for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
3025 : reinterpret_cast<Object**>(new_node->address())[i] =
3026 : Smi::FromInt(kCodeZapValue);
3027 : }
3028 : #endif
3029 :
3030 : // The old-space-step might have finished sweeping and restarted marking.
3031 : // Verify that it did not turn the page of the new node into an evacuation
3032 : // candidate.
3033 : DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
3034 :
3035 : // Memory in the linear allocation area is counted as allocated. We may free
3036 : // a little of this again immediately - see below.
3037 : owner_->IncreaseAllocatedBytes(new_node_size,
3038 1539919 : Page::FromAddress(new_node->address()));
3039 :
3040 1539919 : Address start = new_node->address();
3041 1539919 : Address end = new_node->address() + new_node_size;
3042 1539919 : Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
3043 : DCHECK_LE(limit, end);
3044 : DCHECK_LE(size_in_bytes, limit - start);
3045 1539918 : if (limit != end) {
3046 103919 : owner_->Free(limit, end - limit);
3047 : }
3048 1539918 : owner_->SetAllocationInfo(start, limit);
3049 :
3050 1539913 : return true;
3051 : }
3052 :
3053 14895 : size_t FreeList::EvictFreeListItems(Page* page) {
3054 14895 : size_t sum = 0;
3055 : page->ForAllFreeListCategories(
3056 89370 : [this, &sum](FreeListCategory* category) {
3057 : DCHECK_EQ(this, category->owner());
3058 89370 : sum += category->available();
3059 89370 : RemoveCategory(category);
3060 : category->Invalidate();
3061 89370 : });
3062 14895 : return sum;
3063 : }
3064 :
3065 0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
3066 0 : bool contained = false;
3067 : page->ForAllFreeListCategories(
3068 0 : [this, &contained](FreeListCategory* category) {
3069 0 : if (category->owner() == this && category->is_linked()) {
3070 0 : contained = true;
3071 : }
3072 0 : });
3073 0 : return contained;
3074 : }
3075 :
3076 0 : void FreeList::RepairLists(Heap* heap) {
3077 : ForAllFreeListCategories(
3078 164904 : [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
3079 0 : }
3080 :
3081 0 : bool FreeList::AddCategory(FreeListCategory* category) {
3082 6314632 : FreeListCategoryType type = category->type_;
3083 6314632 : FreeListCategory* top = categories_[type];
3084 :
3085 6314632 : if (category->is_empty()) return false;
3086 2000804 : if (top == category) return false;
3087 :
3088 : // Common double-linked list insertion.
3089 1553581 : if (top != nullptr) {
3090 : top->set_prev(category);
3091 : }
3092 : category->set_next(top);
3093 1553581 : categories_[type] = category;
3094 0 : return true;
3095 : }
3096 :
3097 2254396 : void FreeList::RemoveCategory(FreeListCategory* category) {
3098 930844 : FreeListCategoryType type = category->type_;
3099 930844 : FreeListCategory* top = categories_[type];
3100 :
3101 : // Common double-linked list removal.
3102 930844 : if (top == category) {
3103 314702 : categories_[type] = category->next();
3104 : }
3105 930844 : if (category->prev() != nullptr) {
3106 : category->prev()->set_next(category->next());
3107 : }
3108 930844 : if (category->next() != nullptr) {
3109 : category->next()->set_prev(category->prev());
3110 : }
3111 : category->set_next(nullptr);
3112 : category->set_prev(nullptr);
3113 6 : }
3114 :
3115 0 : void FreeList::PrintCategories(FreeListCategoryType type) {
3116 : FreeListCategoryIterator it(this, type);
3117 : PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
3118 0 : static_cast<void*>(categories_[type]), type);
3119 0 : while (it.HasNext()) {
3120 : FreeListCategory* current = it.Next();
3121 0 : PrintF("%p -> ", static_cast<void*>(current));
3122 : }
3123 0 : PrintF("null\n");
3124 0 : }
3125 :
3126 :
3127 : #ifdef DEBUG
3128 : size_t FreeListCategory::SumFreeList() {
3129 : size_t sum = 0;
3130 : FreeSpace* cur = top();
3131 : while (cur != nullptr) {
3132 : DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
3133 : sum += cur->relaxed_read_size();
3134 : cur = cur->next();
3135 : }
3136 : return sum;
3137 : }
3138 :
3139 : int FreeListCategory::FreeListLength() {
3140 : int length = 0;
3141 : FreeSpace* cur = top();
3142 : while (cur != nullptr) {
3143 : length++;
3144 : cur = cur->next();
3145 : if (length == kVeryLongFreeList) return length;
3146 : }
3147 : return length;
3148 : }
3149 :
3150 : bool FreeList::IsVeryLong() {
3151 : int len = 0;
3152 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
3153 : FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
3154 : while (it.HasNext()) {
3155 : len += it.Next()->FreeListLength();
3156 : if (len >= FreeListCategory::kVeryLongFreeList) return true;
3157 : }
3158 : }
3159 : return false;
3160 : }
3161 :
3162 :
3163 : // This can take a very long time because it is linear in the number of entries
3164 : // on the free list, so it should not be called if FreeListLength returns
3165 : // kVeryLongFreeList.
3166 : size_t FreeList::SumFreeLists() {
3167 : size_t sum = 0;
3168 : ForAllFreeListCategories(
3169 : [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3170 : return sum;
3171 : }
3172 : #endif
3173 :
3174 :
3175 : // -----------------------------------------------------------------------------
3176 : // OldSpace implementation
3177 :
3178 170400 : void PagedSpace::PrepareForMarkCompact() {
3179 : // We don't have a linear allocation area while sweeping. It will be restored
3180 : // on the first allocation after the sweep.
3181 170400 : EmptyAllocationInfo();
3182 :
3183 : // Clear the free list before a full GC---it will be rebuilt afterward.
3184 170400 : free_list_.Reset();
3185 170400 : }
3186 :
3187 8422876 : size_t PagedSpace::SizeOfObjects() {
3188 8422876 : CHECK_GE(limit(), top());
3189 : DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
3190 25268628 : return Size() - (limit() - top());
3191 : }
3192 :
3193 : // After we have booted, we have created a map which represents free space
3194 : // on the heap. If there was already a free list then the elements on it
3195 : // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
3196 : // fix them.
3197 164904 : void PagedSpace::RepairFreeListsAfterDeserialization() {
3198 164904 : free_list_.RepairLists(heap());
3199 : // Each page may have a small free space that is not tracked by a free list.
3200 : // Those free spaces still contain null as their map pointer.
3201 : // Overwrite them with new fillers.
3202 989484 : for (Page* page : *this) {
3203 219892 : int size = static_cast<int>(page->wasted_memory());
3204 219892 : if (size == 0) {
3205 : // If there is no wasted memory then all free space is in the free list.
3206 : continue;
3207 : }
3208 0 : Address start = page->HighWaterMark();
3209 : Address end = page->area_end();
3210 0 : CHECK_EQ(size, static_cast<int>(end - start));
3211 0 : heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
3212 : }
3213 164904 : }
3214 :
3215 35 : bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
3216 35 : MarkCompactCollector* collector = heap()->mark_compact_collector();
3217 35 : if (collector->sweeping_in_progress()) {
3218 : // Wait for the sweeper threads here and complete the sweeping phase.
3219 4 : collector->EnsureSweepingCompleted();
3220 :
3221 : // After waiting for the sweeper threads, there may be new free-list
3222 : // entries.
3223 4 : return free_list_.Allocate(size_in_bytes);
3224 : }
3225 : return false;
3226 : }
3227 :
3228 35 : bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
3229 41 : MarkCompactCollector* collector = heap()->mark_compact_collector();
3230 41 : if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3231 6 : collector->sweeper().ParallelSweepSpace(identity(), 0);
3232 6 : RefillFreeList();
3233 6 : return free_list_.Allocate(size_in_bytes);
3234 : }
3235 : return false;
3236 : }
3237 :
3238 403322 : bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
3239 806644 : VMState<GC> state(heap()->isolate());
3240 : RuntimeCallTimerScope runtime_timer(
3241 403322 : heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
3242 806644 : return RawSlowAllocateRaw(size_in_bytes);
3243 : }
3244 :
3245 57057 : bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
3246 57057 : return RawSlowAllocateRaw(size_in_bytes);
3247 : }
3248 :
3249 460391 : bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
3250 : // Allocation in this space has failed.
3251 : DCHECK_GE(size_in_bytes, 0);
3252 : const int kMaxPagesToSweep = 1;
3253 :
3254 940750 : MarkCompactCollector* collector = heap()->mark_compact_collector();
3255 : // Sweeping is still in progress.
3256 460391 : if (collector->sweeping_in_progress()) {
3257 90634 : if (FLAG_concurrent_sweeping && !is_local() &&
3258 30373 : !collector->sweeper().AreSweeperTasksRunning()) {
3259 20146 : collector->EnsureSweepingCompleted();
3260 : }
3261 :
3262 : // First try to refill the free-list, concurrent sweeper threads
3263 : // may have freed some objects in the meantime.
3264 60260 : RefillFreeList();
3265 :
3266 : // Retry the free list allocation.
3267 60271 : if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
3268 :
3269 34653 : if (locked_page_ != nullptr) {
3270 : DCHECK_EQ(locked_page_->owner()->identity(), identity());
3271 20 : collector->sweeper().ParallelSweepPage(locked_page_, identity());
3272 20 : locked_page_ = nullptr;
3273 20 : if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
3274 : }
3275 :
3276 : // If sweeping is still in progress try to sweep pages.
3277 34653 : int max_freed = collector->sweeper().ParallelSweepSpace(
3278 34653 : identity(), size_in_bytes, kMaxPagesToSweep);
3279 34656 : RefillFreeList();
3280 34656 : if (max_freed >= size_in_bytes) {
3281 32001 : if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
3282 : }
3283 400130 : } else if (is_local()) {
3284 : // Sweeping not in progress and we are on a {CompactionSpace}. This can
3285 : // only happen when we are evacuating for the young generation.
3286 27349 : PagedSpace* main_space = heap()->paged_space(identity());
3287 27349 : Page* page = main_space->RemovePageSafe(size_in_bytes);
3288 27363 : if (page != nullptr) {
3289 21390 : AddPage(page);
3290 21390 : if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
3291 : }
3292 : }
3293 :
3294 390988 : if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3295 : DCHECK((CountTotalPages() > 1) ||
3296 : (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3297 390918 : return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
3298 : }
3299 :
3300 : // If sweeper threads are active, wait for them at that point and steal
3301 : // elements form their free-lists. Allocation may still fail their which
3302 : // would indicate that there is not enough memory for the given allocation.
3303 70 : return SweepAndRetryAllocation(size_in_bytes);
3304 : }
3305 :
3306 : #ifdef DEBUG
3307 : void PagedSpace::ReportStatistics() {
3308 : int pct = static_cast<int>(Available() * 100 / Capacity());
3309 : PrintF(" capacity: %" PRIuS ", waste: %" PRIuS
3310 : ", available: %" PRIuS ", %%%d\n",
3311 : Capacity(), Waste(), Available(), pct);
3312 :
3313 : heap()->mark_compact_collector()->EnsureSweepingCompleted();
3314 : ClearHistograms(heap()->isolate());
3315 : HeapObjectIterator obj_it(this);
3316 : for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next())
3317 : CollectHistogramInfo(obj);
3318 : ReportHistogram(heap()->isolate(), true);
3319 : }
3320 : #endif
3321 :
3322 :
3323 : // -----------------------------------------------------------------------------
3324 : // MapSpace implementation
3325 :
3326 : #ifdef VERIFY_HEAP
3327 : void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
3328 : #endif
3329 :
3330 4020 : Address LargePage::GetAddressToShrink(Address object_address,
3331 : size_t object_size) {
3332 4020 : if (executable() == EXECUTABLE) {
3333 : return 0;
3334 : }
3335 4015 : size_t used_size = ::RoundUp((object_address - address()) + object_size,
3336 4015 : MemoryAllocator::GetCommitPageSize());
3337 4015 : if (used_size < CommittedPhysicalMemory()) {
3338 44 : return address() + used_size;
3339 : }
3340 : return 0;
3341 : }
3342 :
3343 44 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3344 : RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
3345 132 : SlotSet::FREE_EMPTY_BUCKETS);
3346 : RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
3347 44 : SlotSet::FREE_EMPTY_BUCKETS);
3348 44 : RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
3349 44 : RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
3350 44 : }
3351 :
3352 : // -----------------------------------------------------------------------------
3353 : // LargeObjectIterator
3354 :
3355 68805 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3356 68805 : current_ = space->first_page_;
3357 0 : }
3358 :
3359 :
3360 72994 : HeapObject* LargeObjectIterator::Next() {
3361 72994 : if (current_ == nullptr) return nullptr;
3362 :
3363 : HeapObject* object = current_->GetObject();
3364 4189 : current_ = current_->next_page();
3365 4189 : return object;
3366 : }
3367 :
3368 :
3369 : // -----------------------------------------------------------------------------
3370 : // LargeObjectSpace
3371 :
3372 54999 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
3373 : : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
3374 : first_page_(nullptr),
3375 : size_(0),
3376 : page_count_(0),
3377 : objects_size_(0),
3378 109998 : chunk_map_(1024) {}
3379 :
3380 266825 : LargeObjectSpace::~LargeObjectSpace() {}
3381 :
3382 54999 : bool LargeObjectSpace::SetUp() {
3383 54999 : return true;
3384 : }
3385 :
3386 53365 : void LargeObjectSpace::TearDown() {
3387 119380 : while (first_page_ != nullptr) {
3388 : LargePage* page = first_page_;
3389 12650 : first_page_ = first_page_->next_page();
3390 25300 : LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
3391 12650 : heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3392 : }
3393 : SetUp();
3394 53365 : }
3395 :
3396 :
3397 17622 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
3398 : Executability executable) {
3399 : // Check if we want to force a GC before growing the old space further.
3400 : // If so, fail the allocation.
3401 122858 : if (!heap()->CanExpandOldGeneration(object_size) ||
3402 17582 : !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3403 : return AllocationResult::Retry(identity());
3404 : }
3405 :
3406 : LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3407 17508 : object_size, this, executable);
3408 17508 : if (page == nullptr) return AllocationResult::Retry(identity());
3409 : DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
3410 :
3411 17508 : size_ += static_cast<int>(page->size());
3412 : AccountCommitted(page->size());
3413 17508 : objects_size_ += object_size;
3414 17508 : page_count_++;
3415 17508 : page->set_next_page(first_page_);
3416 17508 : first_page_ = page;
3417 :
3418 17508 : InsertChunkMapEntries(page);
3419 :
3420 17508 : HeapObject* object = page->GetObject();
3421 :
3422 : if (Heap::ShouldZapGarbage()) {
3423 : // Make the object consistent so the heap can be verified in OldSpaceStep.
3424 : // We only need to do this in debug builds or if verify_heap is on.
3425 : reinterpret_cast<Object**>(object->address())[0] =
3426 : heap()->fixed_array_map();
3427 : reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
3428 : }
3429 :
3430 : heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3431 17508 : Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
3432 : heap()->CreateFillerObjectAt(object->address(), object_size,
3433 17508 : ClearRecordedSlots::kNo);
3434 17508 : if (heap()->incremental_marking()->black_allocation()) {
3435 : heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
3436 : }
3437 17508 : AllocationStep(object_size, object->address(), object_size);
3438 : DCHECK_IMPLIES(
3439 : heap()->incremental_marking()->black_allocation(),
3440 : heap()->incremental_marking()->marking_state()->IsBlack(object));
3441 17508 : return object;
3442 : }
3443 :
3444 :
3445 6 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
3446 : // On a platform that provides lazy committing of memory, we over-account
3447 : // the actually committed memory. There is no easy way right now to support
3448 : // precise accounting of committed memory in large object space.
3449 6 : return CommittedMemory();
3450 : }
3451 :
3452 :
3453 : // GC support
3454 6 : Object* LargeObjectSpace::FindObject(Address a) {
3455 6 : LargePage* page = FindPage(a);
3456 6 : if (page != nullptr) {
3457 6 : return page->GetObject();
3458 : }
3459 : return Smi::kZero; // Signaling not found.
3460 : }
3461 :
3462 34412038 : LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
3463 34412038 : base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3464 68824076 : return FindPage(a);
3465 : }
3466 :
3467 35884217 : LargePage* LargeObjectSpace::FindPage(Address a) {
3468 : const Address key = MemoryChunk::FromAddress(a)->address();
3469 71768438 : auto it = chunk_map_.find(reinterpret_cast<Address>(key));
3470 35884221 : if (it != chunk_map_.end()) {
3471 34412050 : LargePage* page = it->second;
3472 : DCHECK(LargePage::IsValid(page));
3473 34412050 : if (page->Contains(a)) {
3474 34412050 : return page;
3475 : }
3476 : }
3477 : return nullptr;
3478 : }
3479 :
3480 :
3481 57712 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3482 : IncrementalMarking::NonAtomicMarkingState* marking_state =
3483 : heap()->incremental_marking()->non_atomic_marking_state();
3484 : LargeObjectIterator it(this);
3485 61800 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3486 4088 : if (marking_state->IsBlackOrGrey(obj)) {
3487 : Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3488 4039 : MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
3489 4039 : RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3490 : chunk->ResetProgressBar();
3491 : marking_state->SetLiveBytes(chunk, 0);
3492 : }
3493 : DCHECK(marking_state->IsWhite(obj));
3494 : }
3495 57712 : }
3496 :
3497 17508 : void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3498 : // There may be concurrent access on the chunk map. We have to take the lock
3499 : // here.
3500 17508 : base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3501 285652 : for (Address current = reinterpret_cast<Address>(page);
3502 142826 : current < reinterpret_cast<Address>(page) + page->size();
3503 125318 : current += MemoryChunk::kPageSize) {
3504 125318 : chunk_map_[current] = page;
3505 : }
3506 17508 : }
3507 :
3508 0 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3509 4710 : RemoveChunkMapEntries(page, page->address());
3510 0 : }
3511 :
3512 4754 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3513 : Address free_start) {
3514 118508 : for (Address current = reinterpret_cast<Address>(::RoundUp(
3515 9508 : reinterpret_cast<uintptr_t>(free_start), MemoryChunk::kPageSize));
3516 59254 : current < reinterpret_cast<Address>(page) + page->size();
3517 54500 : current += MemoryChunk::kPageSize) {
3518 : chunk_map_.erase(current);
3519 : }
3520 4754 : }
3521 :
3522 56800 : void LargeObjectSpace::FreeUnmarkedObjects() {
3523 : LargePage* previous = nullptr;
3524 56800 : LargePage* current = first_page_;
3525 : IncrementalMarking::NonAtomicMarkingState* marking_state =
3526 4754 : heap()->incremental_marking()->non_atomic_marking_state();
3527 56800 : objects_size_ = 0;
3528 122330 : while (current != nullptr) {
3529 8730 : HeapObject* object = current->GetObject();
3530 : DCHECK(!marking_state->IsGrey(object));
3531 8730 : if (marking_state->IsBlack(object)) {
3532 : Address free_start;
3533 4020 : size_t size = static_cast<size_t>(object->Size());
3534 4020 : objects_size_ += size;
3535 4020 : if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
3536 : 0) {
3537 : DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3538 44 : current->ClearOutOfLiveRangeSlots(free_start);
3539 44 : RemoveChunkMapEntries(current, free_start);
3540 : const size_t bytes_to_free =
3541 44 : current->size() - (free_start - current->address());
3542 : heap()->memory_allocator()->PartialFreeMemory(
3543 : current, free_start, bytes_to_free,
3544 88 : current->area_start() + object->Size());
3545 44 : size_ -= bytes_to_free;
3546 : AccountUncommitted(bytes_to_free);
3547 : }
3548 : previous = current;
3549 : current = current->next_page();
3550 : } else {
3551 : LargePage* page = current;
3552 : // Cut the chunk out from the chunk list.
3553 : current = current->next_page();
3554 4710 : if (previous == nullptr) {
3555 833 : first_page_ = current;
3556 : } else {
3557 : previous->set_next_page(current);
3558 : }
3559 :
3560 : // Free the chunk.
3561 4710 : size_ -= static_cast<int>(page->size());
3562 : AccountUncommitted(page->size());
3563 4710 : page_count_--;
3564 :
3565 : RemoveChunkMapEntries(page);
3566 4710 : heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3567 : }
3568 : }
3569 56800 : }
3570 :
3571 :
3572 6606546 : bool LargeObjectSpace::Contains(HeapObject* object) {
3573 6606546 : Address address = object->address();
3574 : MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3575 :
3576 6606546 : bool owned = (chunk->owner() == this);
3577 :
3578 : SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3579 :
3580 6606546 : return owned;
3581 : }
3582 :
3583 11093 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3584 11093 : return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3585 : }
3586 :
3587 : #ifdef VERIFY_HEAP
3588 : // We do not assume that the large object iterator works, because it depends
3589 : // on the invariants we are checking during verification.
3590 : void LargeObjectSpace::Verify() {
3591 : for (LargePage* chunk = first_page_; chunk != nullptr;
3592 : chunk = chunk->next_page()) {
3593 : // Each chunk contains an object that starts at the large object page's
3594 : // object area start.
3595 : HeapObject* object = chunk->GetObject();
3596 : Page* page = Page::FromAddress(object->address());
3597 : CHECK(object->address() == page->area_start());
3598 :
3599 : // The first word should be a map, and we expect all map pointers to be
3600 : // in map space.
3601 : Map* map = object->map();
3602 : CHECK(map->IsMap());
3603 : CHECK(heap()->map_space()->Contains(map));
3604 :
3605 : // We have only code, sequential strings, external strings (sequential
3606 : // strings that have been morphed into external strings), thin strings
3607 : // (sequential strings that have been morphed into thin strings), fixed
3608 : // arrays, fixed double arrays, byte arrays, feedback vectors and free space
3609 : // (right after allocation) in the large object space.
3610 : CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3611 : object->IsExternalString() || object->IsThinString() ||
3612 : object->IsFixedArray() || object->IsFixedDoubleArray() ||
3613 : object->IsPropertyArray() || object->IsByteArray() ||
3614 : object->IsFeedbackVector() || object->IsFreeSpace());
3615 :
3616 : // The object itself should look OK.
3617 : object->ObjectVerify();
3618 :
3619 : if (!FLAG_verify_heap_skip_remembered_set) {
3620 : heap()->VerifyRememberedSetFor(object);
3621 : }
3622 :
3623 : // Byte arrays and strings don't have interior pointers.
3624 : if (object->IsAbstractCode()) {
3625 : VerifyPointersVisitor code_visitor;
3626 : object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3627 : } else if (object->IsFixedArray()) {
3628 : FixedArray* array = FixedArray::cast(object);
3629 : for (int j = 0; j < array->length(); j++) {
3630 : Object* element = array->get(j);
3631 : if (element->IsHeapObject()) {
3632 : HeapObject* element_object = HeapObject::cast(element);
3633 : CHECK(heap()->Contains(element_object));
3634 : CHECK(element_object->map()->IsMap());
3635 : }
3636 : }
3637 : } else if (object->IsPropertyArray()) {
3638 : PropertyArray* array = PropertyArray::cast(object);
3639 : for (int j = 0; j < array->length(); j++) {
3640 : Object* property = array->get(j);
3641 : if (property->IsHeapObject()) {
3642 : HeapObject* property_object = HeapObject::cast(property);
3643 : CHECK(heap()->Contains(property_object));
3644 : CHECK(property_object->map()->IsMap());
3645 : }
3646 : }
3647 : }
3648 : }
3649 : }
3650 : #endif
3651 :
3652 : #ifdef DEBUG
3653 : void LargeObjectSpace::Print() {
3654 : OFStream os(stdout);
3655 : LargeObjectIterator it(this);
3656 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3657 : obj->Print(os);
3658 : }
3659 : }
3660 :
3661 :
3662 : void LargeObjectSpace::ReportStatistics() {
3663 : PrintF(" size: %" PRIuS "\n", size_);
3664 : int num_objects = 0;
3665 : ClearHistograms(heap()->isolate());
3666 : LargeObjectIterator it(this);
3667 : for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3668 : num_objects++;
3669 : CollectHistogramInfo(obj);
3670 : }
3671 :
3672 : PrintF(
3673 : " number of objects %d, "
3674 : "size of objects %" PRIuS "\n",
3675 : num_objects, objects_size_);
3676 : if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3677 : }
3678 :
3679 :
3680 : void Page::Print() {
3681 : // Make a best-effort to print the objects in the page.
3682 : PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
3683 : AllocationSpaceName(this->owner()->identity()));
3684 : printf(" --------------------------------------\n");
3685 : HeapObjectIterator objects(this);
3686 : unsigned mark_size = 0;
3687 : for (HeapObject* object = objects.Next(); object != nullptr;
3688 : object = objects.Next()) {
3689 : bool is_marked =
3690 : heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
3691 : PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3692 : if (is_marked) {
3693 : mark_size += object->Size();
3694 : }
3695 : object->ShortPrint();
3696 : PrintF("\n");
3697 : }
3698 : printf(" --------------------------------------\n");
3699 : printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
3700 : heap()->incremental_marking()->marking_state()->live_bytes(this));
3701 : }
3702 :
3703 : #endif // DEBUG
3704 : } // namespace internal
3705 : } // namespace v8
|