LCOV - code coverage report
Current view: top level - src/heap - spaces.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1027 1162 88.4 %
Date: 2017-04-26 Functions: 169 204 82.8 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/spaces.h"
       6             : 
       7             : #include <utility>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/platform/platform.h"
      11             : #include "src/base/platform/semaphore.h"
      12             : #include "src/counters.h"
      13             : #include "src/full-codegen/full-codegen.h"
      14             : #include "src/heap/array-buffer-tracker.h"
      15             : #include "src/heap/incremental-marking.h"
      16             : #include "src/heap/mark-compact.h"
      17             : #include "src/heap/slot-set.h"
      18             : #include "src/macro-assembler.h"
      19             : #include "src/msan.h"
      20             : #include "src/objects-inl.h"
      21             : #include "src/snapshot/snapshot.h"
      22             : #include "src/v8.h"
      23             : 
      24             : namespace v8 {
      25             : namespace internal {
      26             : 
      27             : 
      28             : // ----------------------------------------------------------------------------
      29             : // HeapObjectIterator
      30             : 
      31       73710 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
      32             :     : cur_addr_(nullptr),
      33             :       cur_end_(nullptr),
      34             :       space_(space),
      35             :       page_range_(space->anchor()->next_page(), space->anchor()),
      36      147420 :       current_page_(page_range_.begin()) {}
      37             : 
      38           0 : HeapObjectIterator::HeapObjectIterator(Page* page)
      39             :     : cur_addr_(nullptr),
      40             :       cur_end_(nullptr),
      41           0 :       space_(reinterpret_cast<PagedSpace*>(page->owner())),
      42             :       page_range_(page),
      43           0 :       current_page_(page_range_.begin()) {
      44             : #ifdef DEBUG
      45             :   Space* owner = page->owner();
      46             :   DCHECK(owner == page->heap()->old_space() ||
      47             :          owner == page->heap()->map_space() ||
      48             :          owner == page->heap()->code_space());
      49             : #endif  // DEBUG
      50           0 : }
      51             : 
      52             : // We have hit the end of the page and should advance to the next block of
      53             : // objects.  This happens at the end of the page.
      54      301979 : bool HeapObjectIterator::AdvanceToNextPage() {
      55             :   DCHECK_EQ(cur_addr_, cur_end_);
      56      603958 :   if (current_page_ == page_range_.end()) return false;
      57      228270 :   Page* cur_page = *(current_page_++);
      58             :   space_->heap()
      59             :       ->mark_compact_collector()
      60      228270 :       ->sweeper()
      61      228270 :       .SweepOrWaitUntilSweepingCompleted(cur_page);
      62      228270 :   cur_addr_ = cur_page->area_start();
      63      228270 :   cur_end_ = cur_page->area_end();
      64             :   DCHECK(cur_page->SweepingDone());
      65      228270 :   return true;
      66             : }
      67             : 
      68      122556 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
      69      122556 :     : heap_(heap) {
      70             :   AllSpaces spaces(heap_);
      71      735336 :   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
      72      612780 :     space->PauseAllocationObservers();
      73             :   }
      74      122556 : }
      75             : 
      76      122556 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
      77      122556 :   AllSpaces spaces(heap_);
      78      735336 :   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
      79      612780 :     space->ResumeAllocationObservers();
      80             :   }
      81      122556 : }
      82             : 
      83             : // -----------------------------------------------------------------------------
      84             : // CodeRange
      85             : 
      86             : 
      87        1414 : CodeRange::CodeRange(Isolate* isolate)
      88             :     : isolate_(isolate),
      89             :       code_range_(NULL),
      90             :       free_list_(0),
      91             :       allocation_list_(0),
      92      130048 :       current_allocation_block_index_(0) {}
      93             : 
      94             : 
      95       64324 : bool CodeRange::SetUp(size_t requested) {
      96             :   DCHECK(code_range_ == NULL);
      97             : 
      98       64324 :   if (requested == 0) {
      99             :     // When a target requires the code range feature, we put all code objects
     100             :     // in a kMaximalCodeRangeSize range of virtual address space, so that
     101             :     // they can call each other with near calls.
     102             :     if (kRequiresCodeRange) {
     103             :       requested = kMaximalCodeRangeSize;
     104             :     } else {
     105             :       return true;
     106             :     }
     107             :   }
     108             : 
     109       64324 :   if (requested <= kMinimumCodeRangeSize) {
     110             :     requested = kMinimumCodeRangeSize;
     111             :   }
     112             : 
     113             :   const size_t reserved_area =
     114             :       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
     115             :   if (requested < (kMaximalCodeRangeSize - reserved_area))
     116             :     requested += reserved_area;
     117             : 
     118             :   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
     119             : 
     120             :   code_range_ = new base::VirtualMemory(
     121             :       requested, Max(kCodeRangeAreaAlignment,
     122      128648 :                      static_cast<size_t>(base::OS::AllocateAlignment())));
     123      192996 :   CHECK(code_range_ != NULL);
     124       64324 :   if (!code_range_->IsReserved()) {
     125           0 :     delete code_range_;
     126           0 :     code_range_ = NULL;
     127           0 :     return false;
     128             :   }
     129             : 
     130             :   // We are sure that we have mapped a block of requested addresses.
     131             :   DCHECK(code_range_->size() == requested);
     132       64324 :   Address base = reinterpret_cast<Address>(code_range_->address());
     133             : 
     134             :   // On some platforms, specifically Win64, we need to reserve some pages at
     135             :   // the beginning of an executable space.
     136             :   if (reserved_area > 0) {
     137             :     if (!code_range_->Commit(base, reserved_area, true)) {
     138             :       delete code_range_;
     139             :       code_range_ = NULL;
     140             :       return false;
     141             :     }
     142             :     base += reserved_area;
     143             :   }
     144             :   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
     145       64324 :   size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
     146       64324 :   allocation_list_.Add(FreeBlock(aligned_base, size));
     147       64324 :   current_allocation_block_index_ = 0;
     148             : 
     149       64348 :   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
     150             :   return true;
     151             : }
     152             : 
     153             : 
     154        3626 : int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
     155             :                                        const FreeBlock* right) {
     156             :   // The entire point of CodeRange is that the difference between two
     157             :   // addresses in the range can be represented as a signed 32-bit int,
     158             :   // so the cast is semantically correct.
     159        3626 :   return static_cast<int>(left->start - right->start);
     160             : }
     161             : 
     162             : 
     163          98 : bool CodeRange::GetNextAllocationBlock(size_t requested) {
     164         210 :   for (current_allocation_block_index_++;
     165         105 :        current_allocation_block_index_ < allocation_list_.length();
     166             :        current_allocation_block_index_++) {
     167         308 :     if (requested <= allocation_list_[current_allocation_block_index_].size) {
     168             :       return true;  // Found a large enough allocation block.
     169             :     }
     170             :   }
     171             : 
     172             :   // Sort and merge the free blocks on the free list and the allocation list.
     173        1057 :   free_list_.AddAll(allocation_list_);
     174             :   allocation_list_.Clear();
     175             :   free_list_.Sort(&CompareFreeBlockAddress);
     176         217 :   for (int i = 0; i < free_list_.length();) {
     177         133 :     FreeBlock merged = free_list_[i];
     178         133 :     i++;
     179             :     // Add adjacent free blocks to the current merged block.
     180        1771 :     while (i < free_list_.length() &&
     181         798 :            free_list_[i].start == merged.start + merged.size) {
     182         707 :       merged.size += free_list_[i].size;
     183         707 :       i++;
     184             :     }
     185         133 :     if (merged.size > 0) {
     186         105 :       allocation_list_.Add(merged);
     187             :     }
     188             :   }
     189             :   free_list_.Clear();
     190             : 
     191          84 :   for (current_allocation_block_index_ = 0;
     192          42 :        current_allocation_block_index_ < allocation_list_.length();
     193             :        current_allocation_block_index_++) {
     194          35 :     if (requested <= allocation_list_[current_allocation_block_index_].size) {
     195             :       return true;  // Found a large enough allocation block.
     196             :     }
     197             :   }
     198           7 :   current_allocation_block_index_ = 0;
     199             :   // Code range is full or too fragmented.
     200           7 :   return false;
     201             : }
     202             : 
     203             : 
     204      405174 : Address CodeRange::AllocateRawMemory(const size_t requested_size,
     205             :                                      const size_t commit_size,
     206             :                                      size_t* allocated) {
     207             :   // request_size includes guards while committed_size does not. Make sure
     208             :   // callers know about the invariant.
     209      405174 :   CHECK_LE(commit_size,
     210             :            requested_size - 2 * MemoryAllocator::CodePageGuardSize());
     211             :   FreeBlock current;
     212      405174 :   if (!ReserveBlock(requested_size, &current)) {
     213           7 :     *allocated = 0;
     214           7 :     return NULL;
     215             :   }
     216      405168 :   *allocated = current.size;
     217             :   DCHECK(*allocated <= current.size);
     218             :   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
     219      405168 :   if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
     220      405168 :           code_range_, current.start, commit_size, *allocated)) {
     221           0 :     *allocated = 0;
     222           0 :     ReleaseBlock(&current);
     223           0 :     return NULL;
     224             :   }
     225      405168 :   return current.start;
     226             : }
     227             : 
     228             : 
     229           0 : bool CodeRange::CommitRawMemory(Address start, size_t length) {
     230             :   return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
     231        2467 :                                                             EXECUTABLE);
     232             : }
     233             : 
     234             : 
     235           0 : bool CodeRange::UncommitRawMemory(Address start, size_t length) {
     236         308 :   return code_range_->Uncommit(start, length);
     237             : }
     238             : 
     239             : 
     240      394804 : void CodeRange::FreeRawMemory(Address address, size_t length) {
     241             :   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
     242      394804 :   base::LockGuard<base::Mutex> guard(&code_range_mutex_);
     243      394806 :   free_list_.Add(FreeBlock(address, length));
     244      394806 :   code_range_->Uncommit(address, length);
     245      394806 : }
     246             : 
     247             : 
     248       63527 : void CodeRange::TearDown() {
     249       63527 :   delete code_range_;  // Frees all memory in the virtual memory range.
     250       63527 :   code_range_ = NULL;
     251       63527 :   base::LockGuard<base::Mutex> guard(&code_range_mutex_);
     252             :   free_list_.Free();
     253             :   allocation_list_.Free();
     254       63527 : }
     255             : 
     256             : 
     257      405175 : bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
     258      405175 :   base::LockGuard<base::Mutex> guard(&code_range_mutex_);
     259             :   DCHECK(allocation_list_.length() == 0 ||
     260             :          current_allocation_block_index_ < allocation_list_.length());
     261     1620693 :   if (allocation_list_.length() == 0 ||
     262      810350 :       requested_size > allocation_list_[current_allocation_block_index_].size) {
     263             :     // Find an allocation block large enough.
     264          98 :     if (!GetNextAllocationBlock(requested_size)) return false;
     265             :   }
     266             :   // Commit the requested memory at the start of the current allocation block.
     267             :   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
     268      810336 :   *block = allocation_list_[current_allocation_block_index_];
     269             :   // Don't leave a small free block, useless for a large object or chunk.
     270      405168 :   if (aligned_requested < (block->size - Page::kPageSize)) {
     271      405105 :     block->size = aligned_requested;
     272             :   }
     273             :   DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
     274      405168 :   allocation_list_[current_allocation_block_index_].start += block->size;
     275      405168 :   allocation_list_[current_allocation_block_index_].size -= block->size;
     276      405168 :   return true;
     277             : }
     278             : 
     279             : 
     280           0 : void CodeRange::ReleaseBlock(const FreeBlock* block) {
     281           0 :   base::LockGuard<base::Mutex> guard(&code_range_mutex_);
     282           0 :   free_list_.Add(*block);
     283           0 : }
     284             : 
     285             : 
     286             : // -----------------------------------------------------------------------------
     287             : // MemoryAllocator
     288             : //
     289             : 
     290       63610 : MemoryAllocator::MemoryAllocator(Isolate* isolate)
     291             :     : isolate_(isolate),
     292             :       code_range_(nullptr),
     293             :       capacity_(0),
     294             :       capacity_executable_(0),
     295             :       size_(0),
     296             :       size_executable_(0),
     297             :       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
     298             :       highest_ever_allocated_(reinterpret_cast<void*>(0)),
     299      127220 :       unmapper_(this) {}
     300             : 
     301       63610 : bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
     302             :                             size_t code_range_size) {
     303       63610 :   capacity_ = RoundUp(capacity, Page::kPageSize);
     304       63610 :   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
     305             :   DCHECK_GE(capacity_, capacity_executable_);
     306             : 
     307             :   size_ = 0;
     308             :   size_executable_ = 0;
     309             : 
     310      127220 :   code_range_ = new CodeRange(isolate_);
     311       63610 :   if (!code_range_->SetUp(code_range_size)) return false;
     312             : 
     313       63610 :   return true;
     314             : }
     315             : 
     316             : 
     317       62113 : void MemoryAllocator::TearDown() {
     318       62113 :   unmapper()->TearDown();
     319             : 
     320             :   // Check that spaces were torn down before MemoryAllocator.
     321             :   DCHECK_EQ(size_.Value(), 0u);
     322             :   // TODO(gc) this will be true again when we fix FreeMemory.
     323             :   // DCHECK(size_executable_ == 0);
     324       62113 :   capacity_ = 0;
     325       62113 :   capacity_executable_ = 0;
     326             : 
     327       62113 :   if (last_chunk_.IsReserved()) {
     328           0 :     last_chunk_.Release();
     329             :   }
     330             : 
     331       62113 :   delete code_range_;
     332       62113 :   code_range_ = nullptr;
     333       62113 : }
     334             : 
     335      411040 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
     336             :  public:
     337      205551 :   explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
     338             : 
     339             :  private:
     340             :   // v8::Task overrides.
     341      205498 :   void Run() override {
     342      205498 :     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     343      205537 :     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
     344      205540 :   }
     345             : 
     346             :   Unmapper* unmapper_;
     347             :   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
     348             : };
     349             : 
     350      206362 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
     351      206362 :   ReconsiderDelayedChunks();
     352      206362 :   if (FLAG_concurrent_sweeping) {
     353      205551 :     V8::GetCurrentPlatform()->CallOnBackgroundThread(
     354      411102 :         new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
     355      205551 :     concurrent_unmapping_tasks_active_++;
     356             :   } else {
     357         811 :     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     358             :   }
     359      206362 : }
     360             : 
     361       53352 : bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
     362             :   bool waited = false;
     363      373180 :   while (concurrent_unmapping_tasks_active_ > 0) {
     364      204361 :     pending_unmapping_tasks_semaphore_.Wait();
     365      204361 :     concurrent_unmapping_tasks_active_--;
     366             :     waited = true;
     367             :   }
     368       53352 :   return waited;
     369             : }
     370             : 
     371             : template <MemoryAllocator::Unmapper::FreeMode mode>
     372      268401 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
     373             :   MemoryChunk* chunk = nullptr;
     374             :   // Regular chunks.
     375      732324 :   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
     376             :     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
     377      195515 :     allocator_->PerformFreeMemory(chunk);
     378      195517 :     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
     379             :   }
     380             :   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
     381             :     // The previous loop uncommitted any pages marked as pooled and added them
     382             :     // to the pooled list. In case of kReleasePooled we need to free them
     383             :     // though.
     384      229591 :     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
     385      167476 :       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
     386             :     }
     387             :   }
     388             :   // Non-regular chunks.
     389      276348 :   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     390        7872 :     allocator_->PerformFreeMemory(chunk);
     391             :   }
     392      268461 : }
     393             : 
     394       62115 : void MemoryAllocator::Unmapper::TearDown() {
     395             :   WaitUntilCompleted();
     396       62115 :   ReconsiderDelayedChunks();
     397       62115 :   CHECK(delayed_regular_chunks_.empty());
     398       62115 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     399             :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     400             :     DCHECK(chunks_[i].empty());
     401             :   }
     402       62115 : }
     403             : 
     404      268477 : void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
     405             :   std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
     406             :   // Move constructed, so the permanent list should be empty.
     407             :   DCHECK(delayed_regular_chunks_.empty());
     408      615170 :   for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
     409       78216 :     AddMemoryChunkSafe<kRegular>(*it);
     410             :   }
     411      268477 : }
     412             : 
     413           0 : bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
     414      274515 :   MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
     415             :   // We cannot free a memory chunk in new space while the sweeper is running
     416             :   // because the memory chunk can be in the queue of a sweeper task.
     417             :   // Chunks in old generation are unmapped if they are empty.
     418             :   DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
     419      388977 :   return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
     420      114462 :          !mc->sweeper().sweeping_in_progress();
     421             : }
     422             : 
     423       19602 : bool MemoryAllocator::CommitMemory(Address base, size_t size,
     424             :                                    Executability executable) {
     425       19602 :   if (!base::VirtualMemory::CommitRegion(base, size,
     426       19602 :                                          executable == EXECUTABLE)) {
     427             :     return false;
     428             :   }
     429       19602 :   UpdateAllocatedSpaceLimits(base, base + size);
     430       19602 :   return true;
     431             : }
     432             : 
     433             : 
     434           0 : void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
     435             :                                  Executability executable) {
     436             :   // TODO(gc) make code_range part of memory allocator?
     437             :   // Code which is part of the code-range does not have its own VirtualMemory.
     438             :   DCHECK(code_range() == NULL ||
     439             :          !code_range()->contains(static_cast<Address>(reservation->address())));
     440             :   DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
     441             :          reservation->size() <= Page::kPageSize);
     442             : 
     443      300525 :   reservation->Release();
     444           0 : }
     445             : 
     446             : 
     447      561421 : void MemoryAllocator::FreeMemory(Address base, size_t size,
     448      561421 :                                  Executability executable) {
     449             :   // TODO(gc) make code_range part of memory allocator?
     450     1122841 :   if (code_range() != NULL &&
     451             :       code_range()->contains(static_cast<Address>(base))) {
     452             :     DCHECK(executable == EXECUTABLE);
     453      393945 :     code_range()->FreeRawMemory(base, size);
     454             :   } else {
     455             :     DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
     456      167476 :     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     457             :     USE(result);
     458             :     DCHECK(result);
     459             :   }
     460      561421 : }
     461             : 
     462      480087 : Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
     463             :                                               base::VirtualMemory* controller) {
     464      480087 :   base::VirtualMemory reservation(size, alignment);
     465             : 
     466      480089 :   if (!reservation.IsReserved()) return NULL;
     467      480089 :   size_.Increment(reservation.size());
     468             :   Address base =
     469      480089 :       RoundUp(static_cast<Address>(reservation.address()), alignment);
     470             :   controller->TakeControl(&reservation);
     471      480089 :   return base;
     472             : }
     473             : 
     474      480087 : Address MemoryAllocator::AllocateAlignedMemory(
     475             :     size_t reserve_size, size_t commit_size, size_t alignment,
     476             :     Executability executable, base::VirtualMemory* controller) {
     477             :   DCHECK(commit_size <= reserve_size);
     478      480087 :   base::VirtualMemory reservation;
     479      480087 :   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
     480      480089 :   if (base == NULL) return NULL;
     481             : 
     482      480089 :   if (executable == EXECUTABLE) {
     483         700 :     if (!CommitExecutableMemory(&reservation, base, commit_size,
     484         700 :                                 reserve_size)) {
     485             :       base = NULL;
     486             :     }
     487             :   } else {
     488      479389 :     if (reservation.Commit(base, commit_size, false)) {
     489      479389 :       UpdateAllocatedSpaceLimits(base, base + commit_size);
     490             :     } else {
     491             :       base = NULL;
     492             :     }
     493             :   }
     494             : 
     495      480089 :   if (base == NULL) {
     496             :     // Failed to commit the body. Release the mapping and any partially
     497             :     // commited regions inside it.
     498           0 :     reservation.Release();
     499             :     size_.Decrement(reserve_size);
     500           0 :     return NULL;
     501             :   }
     502             : 
     503             :   controller->TakeControl(&reservation);
     504      480089 :   return base;
     505             : }
     506             : 
     507      422459 : void Page::InitializeAsAnchor(Space* space) {
     508             :   set_owner(space);
     509      422459 :   set_next_chunk(this);
     510             :   set_prev_chunk(this);
     511             :   SetFlags(0, static_cast<uintptr_t>(~0));
     512             :   SetFlag(ANCHOR);
     513      422459 : }
     514             : 
     515      900355 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
     516             :                                      Address area_start, Address area_end,
     517             :                                      Executability executable, Space* owner,
     518             :                                      base::VirtualMemory* reservation) {
     519             :   MemoryChunk* chunk = FromAddress(base);
     520             : 
     521             :   DCHECK(base == chunk->address());
     522             : 
     523      900355 :   chunk->heap_ = heap;
     524      900355 :   chunk->size_ = size;
     525      900355 :   chunk->area_start_ = area_start;
     526      900355 :   chunk->area_end_ = area_end;
     527      900355 :   chunk->flags_ = Flags(NO_FLAGS);
     528             :   chunk->set_owner(owner);
     529             :   chunk->InitializeReservedMemory();
     530             :   chunk->slot_set_[OLD_TO_NEW].SetValue(nullptr);
     531             :   chunk->slot_set_[OLD_TO_OLD].SetValue(nullptr);
     532             :   chunk->typed_slot_set_[OLD_TO_NEW].SetValue(nullptr);
     533             :   chunk->typed_slot_set_[OLD_TO_OLD].SetValue(nullptr);
     534      900355 :   chunk->skip_list_ = nullptr;
     535      900355 :   chunk->progress_bar_ = 0;
     536      900355 :   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
     537             :   chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
     538      900355 :   chunk->mutex_ = new base::RecursiveMutex();
     539             :   chunk->available_in_free_list_ = 0;
     540             :   chunk->wasted_memory_ = 0;
     541      900355 :   chunk->young_generation_bitmap_ = nullptr;
     542             :   chunk->set_next_chunk(nullptr);
     543             :   chunk->set_prev_chunk(nullptr);
     544      900355 :   chunk->local_tracker_ = nullptr;
     545             : 
     546             :   MarkingState::Internal(chunk).ClearLiveness();
     547             : 
     548             :   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
     549             : 
     550      900355 :   if (executable == EXECUTABLE) {
     551             :     chunk->SetFlag(IS_EXECUTABLE);
     552             :   }
     553             : 
     554      900355 :   if (reservation != nullptr) {
     555             :     chunk->reservation_.TakeControl(reservation);
     556             :   }
     557             : 
     558      900355 :   return chunk;
     559             : }
     560             : 
     561             : 
     562             : // Commit MemoryChunk area to the requested size.
     563      353055 : bool MemoryChunk::CommitArea(size_t requested) {
     564             :   size_t guard_size =
     565       87975 :       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
     566       87975 :   size_t header_size = area_start() - address() - guard_size;
     567             :   size_t commit_size =
     568       87975 :       RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
     569      175950 :   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
     570       87975 :                                   MemoryAllocator::GetCommitPageSize());
     571             : 
     572       87975 :   if (commit_size > committed_size) {
     573             :     // Commit size should be less or equal than the reserved size.
     574             :     DCHECK(commit_size <= size() - 2 * guard_size);
     575             :     // Append the committed area.
     576        3622 :     Address start = address() + committed_size + guard_size;
     577        3622 :     size_t length = commit_size - committed_size;
     578        3622 :     if (reservation_.IsReserved()) {
     579             :       Executability executable =
     580        1155 :           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
     581        1155 :       if (!heap()->memory_allocator()->CommitMemory(start, length,
     582        1155 :                                                     executable)) {
     583             :         return false;
     584             :       }
     585             :     } else {
     586        2775 :       CodeRange* code_range = heap_->memory_allocator()->code_range();
     587             :       DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
     588        2467 :       if (!code_range->CommitRawMemory(start, length)) return false;
     589             :     }
     590             : 
     591             :     if (Heap::ShouldZapGarbage()) {
     592             :       heap_->memory_allocator()->ZapBlock(start, length);
     593             :     }
     594       84353 :   } else if (commit_size < committed_size) {
     595             :     DCHECK(commit_size > 0);
     596             :     // Shrink the committed area.
     597        1246 :     size_t length = committed_size - commit_size;
     598        1246 :     Address start = address() + committed_size + guard_size - length;
     599        1246 :     if (reservation_.IsReserved()) {
     600         938 :       if (!reservation_.Uncommit(start, length)) return false;
     601             :     } else {
     602         616 :       CodeRange* code_range = heap_->memory_allocator()->code_range();
     603             :       DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
     604         308 :       if (!code_range->UncommitRawMemory(start, length)) return false;
     605             :     }
     606             :   }
     607             : 
     608       87975 :   area_end_ = area_start_ + requested;
     609       87975 :   return true;
     610             : }
     611             : 
     612        3009 : size_t MemoryChunk::CommittedPhysicalMemory() {
     613        1551 :   if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
     614        1458 :     return size();
     615          93 :   return high_water_mark_.Value();
     616             : }
     617             : 
     618      761478 : void MemoryChunk::InsertAfter(MemoryChunk* other) {
     619             :   MemoryChunk* other_next = other->next_chunk();
     620             : 
     621             :   set_next_chunk(other_next);
     622             :   set_prev_chunk(other);
     623             :   other_next->set_prev_chunk(this);
     624             :   other->set_next_chunk(this);
     625      761478 : }
     626             : 
     627             : 
     628       81167 : void MemoryChunk::Unlink() {
     629             :   MemoryChunk* next_element = next_chunk();
     630             :   MemoryChunk* prev_element = prev_chunk();
     631             :   next_element->set_prev_chunk(prev_element);
     632             :   prev_element->set_next_chunk(next_element);
     633             :   set_prev_chunk(NULL);
     634             :   set_next_chunk(NULL);
     635       81167 : }
     636             : 
     637      182103 : void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
     638             :   DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
     639             :   DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
     640      182103 :   Address free_start = chunk->area_end_ - bytes_to_shrink;
     641             :   // Don't adjust the size of the page. The area is just uncomitted but not
     642             :   // released.
     643      182103 :   chunk->area_end_ -= bytes_to_shrink;
     644      182103 :   UncommitBlock(free_start, bytes_to_shrink);
     645      182104 :   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
     646       60697 :     if (chunk->reservation_.IsReserved())
     647           0 :       chunk->reservation_.Guard(chunk->area_end_);
     648             :     else
     649       60697 :       base::OS::Guard(chunk->area_end_, GetCommitPageSize());
     650             :   }
     651      182104 : }
     652             : 
     653      884375 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
     654             :                                             size_t commit_area_size,
     655             :                                             Executability executable,
     656      404985 :                                             Space* owner) {
     657             :   DCHECK_LE(commit_area_size, reserve_area_size);
     658             : 
     659             :   size_t chunk_size;
     660     2653125 :   Heap* heap = isolate_->heap();
     661             :   Address base = nullptr;
     662      884375 :   base::VirtualMemory reservation;
     663             :   Address area_start = nullptr;
     664             :   Address area_end = nullptr;
     665             : 
     666             :   //
     667             :   // MemoryChunk layout:
     668             :   //
     669             :   //             Executable
     670             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     671             :   // |           Header           |
     672             :   // +----------------------------+<- base + CodePageGuardStartOffset
     673             :   // |           Guard            |
     674             :   // +----------------------------+<- area_start_
     675             :   // |           Area             |
     676             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     677             :   // |   Committed but not used   |
     678             :   // +----------------------------+<- aligned at OS page boundary
     679             :   // | Reserved but not committed |
     680             :   // +----------------------------+<- aligned at OS page boundary
     681             :   // |           Guard            |
     682             :   // +----------------------------+<- base + chunk_size
     683             :   //
     684             :   //           Non-executable
     685             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     686             :   // |          Header            |
     687             :   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
     688             :   // |           Area             |
     689             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     690             :   // |  Committed but not used    |
     691             :   // +----------------------------+<- aligned at OS page boundary
     692             :   // | Reserved but not committed |
     693             :   // +----------------------------+<- base + chunk_size
     694             :   //
     695             : 
     696      884374 :   if (executable == EXECUTABLE) {
     697      404986 :     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
     698      809972 :                          GetCommitPageSize()) +
     699      404986 :                  CodePageGuardSize();
     700             : 
     701             :     // Check executable memory limit.
     702      404986 :     if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
     703           0 :       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
     704             :                                 "V8 Executable Allocation capacity exceeded"));
     705             :       return NULL;
     706             :     }
     707             : 
     708             :     // Size of header (not executable) plus area (executable).
     709      404985 :     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
     710      404985 :                                  GetCommitPageSize());
     711             :     // Allocate executable memory either from code range or from the
     712             :     // OS.
     713             : #ifdef V8_TARGET_ARCH_MIPS64
     714             :     // Use code range only for large object space on mips64 to keep address
     715             :     // range within 256-MB memory region.
     716             :     if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
     717             : #else
     718      404985 :     if (code_range()->valid()) {
     719             : #endif
     720             :       base =
     721      404285 :           code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
     722             :       DCHECK(
     723             :           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
     724      404286 :       if (base == NULL) return NULL;
     725      404286 :       size_.Increment(chunk_size);
     726             :       // Update executable memory size.
     727      404286 :       size_executable_.Increment(chunk_size);
     728             :     } else {
     729             :       base = AllocateAlignedMemory(chunk_size, commit_size,
     730             :                                    MemoryChunk::kAlignment, executable,
     731         700 :                                    &reservation);
     732         700 :       if (base == NULL) return NULL;
     733             :       // Update executable memory size.
     734         700 :       size_executable_.Increment(reservation.size());
     735             :     }
     736             : 
     737             :     if (Heap::ShouldZapGarbage()) {
     738             :       ZapBlock(base, CodePageGuardStartOffset());
     739             :       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
     740             :     }
     741             : 
     742      404986 :     area_start = base + CodePageAreaStartOffset();
     743      404986 :     area_end = area_start + commit_area_size;
     744             :   } else {
     745             :     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
     746      958774 :                          GetCommitPageSize());
     747             :     size_t commit_size =
     748             :         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
     749      479388 :                 GetCommitPageSize());
     750             :     base =
     751             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     752      479388 :                               executable, &reservation);
     753             : 
     754      479389 :     if (base == NULL) return NULL;
     755             : 
     756             :     if (Heap::ShouldZapGarbage()) {
     757             :       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
     758             :     }
     759             : 
     760      479389 :     area_start = base + Page::kObjectStartOffset;
     761      479389 :     area_end = area_start + commit_area_size;
     762             :   }
     763             : 
     764             :   // Use chunk_size for statistics and callbacks because we assume that they
     765             :   // treat reserved but not-yet committed memory regions of chunks as allocated.
     766             :   isolate_->counters()->memory_allocated()->Increment(
     767     1768750 :       static_cast<int>(chunk_size));
     768             : 
     769     1768750 :   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
     770             : 
     771             :   // We cannot use the last chunk in the address space because we would
     772             :   // overflow when comparing top and limit if this chunk is used for a
     773             :   // linear allocation area.
     774      884375 :   if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
     775           0 :     CHECK(!last_chunk_.IsReserved());
     776             :     last_chunk_.TakeControl(&reservation);
     777             :     UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
     778           0 :                   last_chunk_.size());
     779           0 :     size_.Decrement(chunk_size);
     780           0 :     if (executable == EXECUTABLE) {
     781           0 :       size_executable_.Decrement(chunk_size);
     782             :     }
     783           0 :     CHECK(last_chunk_.IsReserved());
     784             :     return AllocateChunk(reserve_area_size, commit_area_size, executable,
     785           0 :                          owner);
     786             :   }
     787             : 
     788             :   return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
     789      884375 :                                  executable, owner, &reservation);
     790             : }
     791             : 
     792             : 
     793      495021 : void Page::ResetFreeListStatistics() {
     794             :   wasted_memory_ = 0;
     795             :   available_in_free_list_ = 0;
     796      495021 : }
     797             : 
     798           0 : size_t Page::AvailableInFreeList() {
     799           0 :   size_t sum = 0;
     800           0 :   ForAllFreeListCategories([&sum](FreeListCategory* category) {
     801           0 :     sum += category->available();
     802             :   });
     803           0 :   return sum;
     804             : }
     805             : 
     806      303543 : size_t Page::ShrinkToHighWaterMark() {
     807             :   // Shrink pages to high water mark. The water mark points either to a filler
     808             :   // or the area_end.
     809     1335460 :   HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
     810      303542 :   if (filler->address() == area_end()) return 0;
     811      303534 :   CHECK(filler->IsFiller());
     812      303533 :   if (!filler->IsFreeSpace()) return 0;
     813             : 
     814             : #ifdef DEBUG
     815             :   // Check the the filler is indeed the last filler on the page.
     816             :   HeapObjectIterator it(this);
     817             :   HeapObject* filler2 = nullptr;
     818             :   for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
     819             :     filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
     820             :   }
     821             :   if (filler2 == nullptr || filler2->address() == area_end()) return 0;
     822             :   DCHECK(filler2->IsFiller());
     823             :   // The deserializer might leave behind fillers. In this case we need to
     824             :   // iterate even further.
     825             :   while ((filler2->address() + filler2->Size()) != area_end()) {
     826             :     filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
     827             :     DCHECK(filler2->IsFiller());
     828             :   }
     829             :   DCHECK_EQ(filler->address(), filler2->address());
     830             : #endif  // DEBUG
     831             : 
     832             :   size_t unused = RoundDown(
     833      303502 :       static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
     834             :       MemoryAllocator::GetCommitPageSize());
     835      303502 :   if (unused > 0) {
     836      182104 :     if (FLAG_trace_gc_verbose) {
     837             :       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
     838             :                    reinterpret_cast<void*>(this),
     839             :                    reinterpret_cast<void*>(area_end()),
     840           0 :                    reinterpret_cast<void*>(area_end() - unused));
     841             :     }
     842             :     heap()->CreateFillerObjectAt(
     843             :         filler->address(),
     844      182104 :         static_cast<int>(area_end() - filler->address() - unused),
     845      364208 :         ClearRecordedSlots::kNo);
     846      182103 :     heap()->memory_allocator()->ShrinkChunk(this, unused);
     847      182104 :     CHECK(filler->IsFiller());
     848      182104 :     CHECK_EQ(filler->address() + filler->Size(), area_end());
     849             :   }
     850      303502 :   return unused;
     851             : }
     852             : 
     853        4184 : void Page::CreateBlackArea(Address start, Address end) {
     854             :   DCHECK(heap()->incremental_marking()->black_allocation());
     855             :   DCHECK_EQ(Page::FromAddress(start), this);
     856             :   DCHECK_NE(start, end);
     857             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
     858             :   MarkingState::Internal(this).bitmap()->SetRange(AddressToMarkbitIndex(start),
     859        8368 :                                                   AddressToMarkbitIndex(end));
     860             :   MarkingState::Internal(this).IncrementLiveBytes(
     861        4184 :       static_cast<int>(end - start));
     862        4184 : }
     863             : 
     864          37 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
     865             :                                         Address start_free) {
     866             :   // We do not allow partial shrink for code.
     867             :   DCHECK(chunk->executable() == NOT_EXECUTABLE);
     868             : 
     869             :   intptr_t size;
     870          37 :   base::VirtualMemory* reservation = chunk->reserved_memory();
     871             :   DCHECK(reservation->IsReserved());
     872          37 :   size = static_cast<intptr_t>(reservation->size());
     873             : 
     874          37 :   size_t to_free_size = size - (start_free - chunk->address());
     875             : 
     876             :   DCHECK(size_.Value() >= to_free_size);
     877             :   size_.Decrement(to_free_size);
     878             :   isolate_->counters()->memory_allocated()->Decrement(
     879          37 :       static_cast<int>(to_free_size));
     880          37 :   chunk->set_size(size - to_free_size);
     881             : 
     882          37 :   reservation->ReleasePartial(start_free);
     883          37 : }
     884             : 
     885     1272315 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
     886             :   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
     887     1756741 :   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
     888             : 
     889             :   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
     890      878370 :                                          chunk->IsEvacuationCandidate());
     891             : 
     892     1362797 :   base::VirtualMemory* reservation = chunk->reserved_memory();
     893             :   const size_t size =
     894      878371 :       reservation->IsReserved() ? reservation->size() : chunk->size();
     895             :   DCHECK_GE(size_.Value(), static_cast<size_t>(size));
     896             :   size_.Decrement(size);
     897     1756742 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
     898      878371 :   if (chunk->executable() == EXECUTABLE) {
     899             :     DCHECK_GE(size_executable_.Value(), size);
     900             :     size_executable_.Decrement(size);
     901             :   }
     902             : 
     903             :   chunk->SetFlag(MemoryChunk::PRE_FREED);
     904      878371 : }
     905             : 
     906             : 
     907     1271511 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
     908             :   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
     909      877566 :   chunk->ReleaseAllocatedMemory();
     910             : 
     911      877578 :   base::VirtualMemory* reservation = chunk->reserved_memory();
     912      877578 :   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
     913      183104 :     UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
     914             :   } else {
     915      694474 :     if (reservation->IsReserved()) {
     916             :       FreeMemory(reservation, chunk->executable());
     917             :     } else {
     918      393945 :       FreeMemory(chunk->address(), chunk->size(), chunk->executable());
     919             :     }
     920             :   }
     921      877589 : }
     922             : 
     923             : template <MemoryAllocator::FreeMode mode>
     924     1045846 : void MemoryAllocator::Free(MemoryChunk* chunk) {
     925             :   switch (mode) {
     926             :     case kFull:
     927      674199 :       PreFreeMemory(chunk);
     928      674200 :       PerformFreeMemory(chunk);
     929             :       break;
     930             :     case kAlreadyPooled:
     931             :       // Pooled pages cannot be touched anymore as their memory is uncommitted.
     932      167476 :       FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
     933             :                  Executability::NOT_EXECUTABLE);
     934             :       break;
     935             :     case kPooledAndQueue:
     936             :       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
     937             :       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
     938             :       chunk->SetFlag(MemoryChunk::POOLED);
     939             :     // Fall through to kPreFreeAndQueue.
     940             :     case kPreFreeAndQueue:
     941      204171 :       PreFreeMemory(chunk);
     942             :       // The chunks added to this queue will be freed by a concurrent thread.
     943      204171 :       unmapper()->AddMemoryChunkSafe(chunk);
     944             :       break;
     945             :   }
     946     1045847 : }
     947             : 
     948             : template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
     949             : 
     950             : template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
     951             :     MemoryChunk* chunk);
     952             : 
     953             : template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
     954             :     MemoryChunk* chunk);
     955             : 
     956             : template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
     957             :     MemoryChunk* chunk);
     958             : 
     959             : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
     960      694307 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
     961             :                                     Executability executable) {
     962             :   MemoryChunk* chunk = nullptr;
     963             :   if (alloc_mode == kPooled) {
     964             :     DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
     965             :     DCHECK_EQ(executable, NOT_EXECUTABLE);
     966      188999 :     chunk = AllocatePagePooled(owner);
     967             :   }
     968      188999 :   if (chunk == nullptr) {
     969      678327 :     chunk = AllocateChunk(size, size, executable, owner);
     970             :   }
     971      694307 :   if (chunk == nullptr) return nullptr;
     972      694307 :   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
     973             : }
     974             : 
     975             : template Page*
     976             : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
     977             :     size_t size, PagedSpace* owner, Executability executable);
     978             : template Page*
     979             : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
     980             :     size_t size, SemiSpace* owner, Executability executable);
     981             : template Page*
     982             : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
     983             :     size_t size, SemiSpace* owner, Executability executable);
     984             : 
     985       20902 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
     986             :                                               LargeObjectSpace* owner,
     987             :                                               Executability executable) {
     988       20902 :   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
     989       20902 :   if (chunk == nullptr) return nullptr;
     990       20902 :   return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
     991             : }
     992             : 
     993             : template <typename SpaceType>
     994      188999 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
     995      188999 :   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
     996      188999 :   if (chunk == nullptr) return nullptr;
     997             :   const int size = MemoryChunk::kPageSize;
     998             :   const Address start = reinterpret_cast<Address>(chunk);
     999       15980 :   const Address area_start = start + MemoryChunk::kObjectStartOffset;
    1000       15980 :   const Address area_end = start + size;
    1001       15980 :   if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
    1002             :     return nullptr;
    1003             :   }
    1004             :   base::VirtualMemory reservation(start, size);
    1005       15980 :   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
    1006       15980 :                           NOT_EXECUTABLE, owner, &reservation);
    1007             :   size_.Increment(size);
    1008       15980 :   return chunk;
    1009             : }
    1010             : 
    1011       15980 : bool MemoryAllocator::CommitBlock(Address start, size_t size,
    1012             :                                   Executability executable) {
    1013       15980 :   if (!CommitMemory(start, size, executable)) return false;
    1014             : 
    1015             :   if (Heap::ShouldZapGarbage()) {
    1016             :     ZapBlock(start, size);
    1017             :   }
    1018             : 
    1019       15980 :   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
    1020       15980 :   return true;
    1021             : }
    1022             : 
    1023             : 
    1024      365191 : bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
    1025      365191 :   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
    1026      365216 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    1027      365216 :   return true;
    1028             : }
    1029             : 
    1030             : 
    1031           0 : void MemoryAllocator::ZapBlock(Address start, size_t size) {
    1032           0 :   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
    1033           0 :     Memory::Address_at(start + s) = kZapValue;
    1034             :   }
    1035           0 : }
    1036             : 
    1037             : #ifdef DEBUG
    1038             : void MemoryAllocator::ReportStatistics() {
    1039             :   size_t size = Size();
    1040             :   float pct = static_cast<float>(capacity_ - size) / capacity_;
    1041             :   PrintF("  capacity: %zu , used: %" PRIuS ", available: %%%d\n\n",
    1042             :          capacity_, size, static_cast<int>(pct * 100));
    1043             : }
    1044             : #endif
    1045             : 
    1046     3772450 : size_t MemoryAllocator::CodePageGuardStartOffset() {
    1047             :   // We are guarding code pages: the first OS page after the header
    1048             :   // will be protected as non-writable.
    1049     3772450 :   return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
    1050             : }
    1051             : 
    1052        2289 : size_t MemoryAllocator::CodePageGuardSize() {
    1053     3047489 :   return static_cast<int>(GetCommitPageSize());
    1054             : }
    1055             : 
    1056     1742597 : size_t MemoryAllocator::CodePageAreaStartOffset() {
    1057             :   // We are guarding code pages: the first OS page after the header
    1058             :   // will be protected as non-writable.
    1059     3485194 :   return CodePageGuardStartOffset() + CodePageGuardSize();
    1060             : }
    1061             : 
    1062         847 : size_t MemoryAllocator::CodePageAreaEndOffset() {
    1063             :   // We are guarding code pages: the last OS page will be protected as
    1064             :   // non-writable.
    1065      120889 :   return Page::kPageSize - static_cast<int>(GetCommitPageSize());
    1066             : }
    1067             : 
    1068      449867 : intptr_t MemoryAllocator::GetCommitPageSize() {
    1069     9765354 :   if (FLAG_v8_os_page_size != 0) {
    1070             :     DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
    1071           0 :     return FLAG_v8_os_page_size * KB;
    1072             :   } else {
    1073     9765354 :     return base::OS::CommitPageSize();
    1074             :   }
    1075             : }
    1076             : 
    1077             : 
    1078      405868 : bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
    1079             :                                              Address start, size_t commit_size,
    1080             :                                              size_t reserved_size) {
    1081             :   // Commit page header (not executable).
    1082             :   Address header = start;
    1083      405868 :   size_t header_size = CodePageGuardStartOffset();
    1084      405868 :   if (vm->Commit(header, header_size, false)) {
    1085             :     // Create guard page after the header.
    1086      405868 :     if (vm->Guard(start + CodePageGuardStartOffset())) {
    1087             :       // Commit page body (executable).
    1088      405868 :       Address body = start + CodePageAreaStartOffset();
    1089      405868 :       size_t body_size = commit_size - CodePageGuardStartOffset();
    1090      405868 :       if (vm->Commit(body, body_size, true)) {
    1091             :         // Create guard page before the end.
    1092      405868 :         if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
    1093      405868 :           UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
    1094      811736 :                                                 commit_size -
    1095      811736 :                                                 CodePageGuardStartOffset());
    1096      405868 :           return true;
    1097             :         }
    1098           0 :         vm->Uncommit(body, body_size);
    1099             :       }
    1100             :     }
    1101           0 :     vm->Uncommit(header, header_size);
    1102             :   }
    1103             :   return false;
    1104             : }
    1105             : 
    1106             : 
    1107             : // -----------------------------------------------------------------------------
    1108             : // MemoryChunk implementation
    1109             : 
    1110      878298 : void MemoryChunk::ReleaseAllocatedMemory() {
    1111      878298 :   if (skip_list_ != nullptr) {
    1112      214826 :     delete skip_list_;
    1113      214826 :     skip_list_ = nullptr;
    1114             :   }
    1115      878298 :   if (mutex_ != nullptr) {
    1116      878282 :     delete mutex_;
    1117      878311 :     mutex_ = nullptr;
    1118             :   }
    1119      878327 :   ReleaseSlotSet<OLD_TO_NEW>();
    1120      878256 :   ReleaseSlotSet<OLD_TO_OLD>();
    1121      878234 :   ReleaseTypedSlotSet<OLD_TO_NEW>();
    1122      878226 :   ReleaseTypedSlotSet<OLD_TO_OLD>();
    1123      878190 :   if (local_tracker_ != nullptr) ReleaseLocalTracker();
    1124      878198 :   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
    1125      878198 : }
    1126             : 
    1127       90079 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
    1128       90079 :   size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
    1129             :   DCHECK(pages > 0);
    1130       90079 :   SlotSet* slot_set = new SlotSet[pages];
    1131      182238 :   for (size_t i = 0; i < pages; i++) {
    1132       92159 :     slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
    1133             :   }
    1134       90079 :   return slot_set;
    1135             : }
    1136             : 
    1137             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
    1138             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
    1139             : 
    1140             : template <RememberedSetType type>
    1141       90079 : SlotSet* MemoryChunk::AllocateSlotSet() {
    1142       90079 :   SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
    1143       90079 :   if (!slot_set_[type].TrySetValue(nullptr, slot_set)) {
    1144           0 :     delete[] slot_set;
    1145             :     slot_set = slot_set_[type].Value();
    1146             :     DCHECK(slot_set);
    1147           0 :     return slot_set;
    1148             :   }
    1149             :   return slot_set;
    1150             : }
    1151             : 
    1152             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
    1153             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
    1154             : 
    1155             : template <RememberedSetType type>
    1156     1765112 : void MemoryChunk::ReleaseSlotSet() {
    1157             :   SlotSet* slot_set = slot_set_[type].Value();
    1158     1765096 :   if (slot_set) {
    1159       87395 :     delete[] slot_set;
    1160             :     slot_set_[type].SetValue(nullptr);
    1161             :   }
    1162     1765098 : }
    1163             : 
    1164             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
    1165             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
    1166             : 
    1167             : template <RememberedSetType type>
    1168       16151 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
    1169       16151 :   TypedSlotSet* slot_set = new TypedSlotSet(address());
    1170       16151 :   if (!typed_slot_set_[type].TrySetValue(nullptr, slot_set)) {
    1171           0 :     delete slot_set;
    1172             :     slot_set = typed_slot_set_[type].Value();
    1173             :     DCHECK(slot_set);
    1174           0 :     return slot_set;
    1175             :   }
    1176             :   return slot_set;
    1177             : }
    1178             : 
    1179             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
    1180             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
    1181             : 
    1182             : template <RememberedSetType type>
    1183     1762004 : void MemoryChunk::ReleaseTypedSlotSet() {
    1184             :   TypedSlotSet* typed_slot_set = typed_slot_set_[type].Value();
    1185     1761966 :   if (typed_slot_set) {
    1186       15675 :     delete typed_slot_set;
    1187             :     typed_slot_set_[type].SetValue(nullptr);
    1188             :   }
    1189     1761966 : }
    1190             : 
    1191      190132 : void MemoryChunk::AllocateLocalTracker() {
    1192             :   DCHECK_NULL(local_tracker_);
    1193      380264 :   local_tracker_ = new LocalArrayBufferTracker(heap());
    1194      190132 : }
    1195             : 
    1196      185718 : void MemoryChunk::ReleaseLocalTracker() {
    1197             :   DCHECK_NOT_NULL(local_tracker_);
    1198      185718 :   delete local_tracker_;
    1199      185724 :   local_tracker_ = nullptr;
    1200      185724 : }
    1201             : 
    1202           0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
    1203             :   DCHECK_NULL(young_generation_bitmap_);
    1204           0 :   young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1205           0 : }
    1206             : 
    1207           0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
    1208             :   DCHECK_NOT_NULL(young_generation_bitmap_);
    1209           0 :   free(young_generation_bitmap_);
    1210           0 :   young_generation_bitmap_ = nullptr;
    1211           0 : }
    1212             : 
    1213             : // -----------------------------------------------------------------------------
    1214             : // PagedSpace implementation
    1215             : 
    1216             : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
    1217             :               ObjectSpace::kObjectSpaceNewSpace);
    1218             : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
    1219             :               ObjectSpace::kObjectSpaceOldSpace);
    1220             : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
    1221             :               ObjectSpace::kObjectSpaceCodeSpace);
    1222             : STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
    1223             :               ObjectSpace::kObjectSpaceMapSpace);
    1224             : 
    1225        5088 : void Space::AddAllocationObserver(AllocationObserver* observer) {
    1226       67163 :   allocation_observers_->Add(observer);
    1227        5088 : }
    1228             : 
    1229        4980 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
    1230       65531 :   bool removed = allocation_observers_->RemoveElement(observer);
    1231             :   USE(removed);
    1232             :   DCHECK(removed);
    1233        4980 : }
    1234             : 
    1235      612780 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
    1236             : 
    1237      490224 : void Space::ResumeAllocationObservers() {
    1238      612780 :   allocation_observers_paused_ = false;
    1239      490224 : }
    1240             : 
    1241   263512398 : void Space::AllocationStep(Address soon_object, int size) {
    1242   263512398 :   if (!allocation_observers_paused_) {
    1243   234589658 :     for (int i = 0; i < allocation_observers_->length(); ++i) {
    1244    16904430 :       AllocationObserver* o = (*allocation_observers_)[i];
    1245     8452215 :       o->AllocationStep(size, soon_object, size);
    1246             :     }
    1247             :   }
    1248   263512398 : }
    1249             : 
    1250      300881 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
    1251             :                        Executability executable)
    1252     1203524 :     : Space(heap, space, executable), anchor_(this), free_list_(this) {
    1253      300881 :   area_size_ = MemoryAllocator::PageAreaSize(space);
    1254             :   accounting_stats_.Clear();
    1255             : 
    1256             :   allocation_info_.Reset(nullptr, nullptr);
    1257      300881 : }
    1258             : 
    1259             : 
    1260      182354 : bool PagedSpace::SetUp() { return true; }
    1261             : 
    1262             : 
    1263           0 : bool PagedSpace::HasBeenSetUp() { return true; }
    1264             : 
    1265             : 
    1266      296390 : void PagedSpace::TearDown() {
    1267     1072429 :   for (auto it = begin(); it != end();) {
    1268             :     Page* page = *(it++);  // Will be erased.
    1269      479649 :     ArrayBufferTracker::FreeAll(page);
    1270      479649 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    1271             :   }
    1272             :   anchor_.set_next_page(&anchor_);
    1273             :   anchor_.set_prev_page(&anchor_);
    1274             :   accounting_stats_.Clear();
    1275      296390 : }
    1276             : 
    1277      253444 : void PagedSpace::RefillFreeList() {
    1278             :   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
    1279             :   // generation spaces out.
    1280      253444 :   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
    1281             :       identity() != MAP_SPACE) {
    1282      253457 :     return;
    1283             :   }
    1284      253449 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    1285             :   intptr_t added = 0;
    1286             :   {
    1287             :     Page* p = nullptr;
    1288      982564 :     while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
    1289             :       // Only during compaction pages can actually change ownership. This is
    1290             :       // safe because there exists no other competing action on the page links
    1291             :       // during compaction.
    1292      479652 :       if (is_local() && (p->owner() != this)) {
    1293             :         base::LockGuard<base::Mutex> guard(
    1294       30798 :             reinterpret_cast<PagedSpace*>(p->owner())->mutex());
    1295       30798 :         p->Unlink();
    1296             :         p->set_owner(this);
    1297       30798 :         p->InsertAfter(anchor_.prev_page());
    1298             :       }
    1299      479652 :       added += RelinkFreeListCategories(p);
    1300      479652 :       added += p->wasted_memory();
    1301      479652 :       if (is_local() && (added > kCompactionMemoryWanted)) break;
    1302             :     }
    1303             :   }
    1304      253462 :   accounting_stats_.IncreaseCapacity(added);
    1305             : }
    1306             : 
    1307      118521 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
    1308             :   DCHECK(identity() == other->identity());
    1309             :   // Unmerged fields:
    1310             :   //   area_size_
    1311             :   //   anchor_
    1312             : 
    1313      118521 :   other->EmptyAllocationInfo();
    1314             : 
    1315             :   // Update and clear accounting statistics.
    1316             :   accounting_stats_.Merge(other->accounting_stats_);
    1317             :   other->accounting_stats_.Clear();
    1318             : 
    1319             :   // The linear allocation area of {other} should be destroyed now.
    1320             :   DCHECK(other->top() == nullptr);
    1321             :   DCHECK(other->limit() == nullptr);
    1322             : 
    1323      118521 :   AccountCommitted(other->CommittedMemory());
    1324             : 
    1325             :   // Move over pages.
    1326      272260 :   for (auto it = other->begin(); it != other->end();) {
    1327             :     Page* p = *(it++);
    1328             : 
    1329             :     // Relinking requires the category to be unlinked.
    1330             :     other->UnlinkFreeListCategories(p);
    1331             : 
    1332       35218 :     p->Unlink();
    1333             :     p->set_owner(this);
    1334       35218 :     p->InsertAfter(anchor_.prev_page());
    1335             :     RelinkFreeListCategories(p);
    1336             :     DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
    1337             :   }
    1338      118521 : }
    1339             : 
    1340             : 
    1341          21 : size_t PagedSpace::CommittedPhysicalMemory() {
    1342          21 :   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
    1343          21 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1344             :   size_t size = 0;
    1345         196 :   for (Page* page : *this) {
    1346          77 :     size += page->CommittedPhysicalMemory();
    1347             :   }
    1348          21 :   return size;
    1349             : }
    1350             : 
    1351          12 : bool PagedSpace::ContainsSlow(Address addr) {
    1352             :   Page* p = Page::FromAddress(addr);
    1353         252 :   for (Page* page : *this) {
    1354         120 :     if (page == p) return true;
    1355             :   }
    1356           6 :   return false;
    1357             : }
    1358             : 
    1359      182091 : void PagedSpace::ShrinkImmortalImmovablePages() {
    1360             :   DCHECK(!heap()->deserialization_complete());
    1361      182091 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1362      182091 :   EmptyAllocationInfo();
    1363             :   ResetFreeList();
    1364             : 
    1365      971212 :   for (Page* page : *this) {
    1366             :     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
    1367      303515 :     size_t unused = page->ShrinkToHighWaterMark();
    1368             :     accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
    1369             :     AccountUncommitted(unused);
    1370             :   }
    1371      182091 : }
    1372             : 
    1373      509125 : bool PagedSpace::Expand() {
    1374             :   const int size = AreaSize();
    1375             : 
    1376     1523542 :   if (!heap()->CanExpandOldGeneration(size)) return false;
    1377             : 
    1378      505292 :   Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
    1379      505292 :   if (p == nullptr) return false;
    1380             : 
    1381      505292 :   AccountCommitted(p->size());
    1382             : 
    1383             :   // Pages created during bootstrapping may contain immortal immovable objects.
    1384      505292 :   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
    1385             : 
    1386             :   DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
    1387             : 
    1388      505292 :   p->InsertAfter(anchor_.prev_page());
    1389             : 
    1390      505292 :   return true;
    1391             : }
    1392             : 
    1393             : 
    1394      106914 : int PagedSpace::CountTotalPages() {
    1395             :   int count = 0;
    1396      983130 :   for (Page* page : *this) {
    1397      384651 :     count++;
    1398             :     USE(page);
    1399             :   }
    1400      106914 :   return count;
    1401             : }
    1402             : 
    1403             : 
    1404      160395 : void PagedSpace::ResetFreeListStatistics() {
    1405     1310832 :   for (Page* page : *this) {
    1406      495021 :     page->ResetFreeListStatistics();
    1407             :   }
    1408      160395 : }
    1409             : 
    1410     2227930 : void PagedSpace::SetAllocationInfo(Address top, Address limit) {
    1411             :   SetTopAndLimit(top, limit);
    1412     4360433 :   if (top != nullptr && top != limit &&
    1413     2132503 :       heap()->incremental_marking()->black_allocation()) {
    1414        1060 :     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
    1415             :   }
    1416     2227930 : }
    1417             : 
    1418        2931 : void PagedSpace::MarkAllocationInfoBlack() {
    1419             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1420             :   Address current_top = top();
    1421             :   Address current_limit = limit();
    1422        2931 :   if (current_top != nullptr && current_top != current_limit) {
    1423             :     Page::FromAllocationAreaAddress(current_top)
    1424        2054 :         ->CreateBlackArea(current_top, current_limit);
    1425             :   }
    1426        2931 : }
    1427             : 
    1428             : // Empty space allocation info, returning unused area to free list.
    1429     3287187 : void PagedSpace::EmptyAllocationInfo() {
    1430             :   // Mark the old linear allocation area with a free space map so it can be
    1431             :   // skipped when scanning the heap.
    1432             :   Address current_top = top();
    1433             :   Address current_limit = limit();
    1434     3287187 :   if (current_top == nullptr) {
    1435             :     DCHECK(current_limit == nullptr);
    1436     3287189 :     return;
    1437             :   }
    1438             : 
    1439     2088524 :   if (heap()->incremental_marking()->black_allocation()) {
    1440             :     Page* page = Page::FromAllocationAreaAddress(current_top);
    1441             : 
    1442             :     // Clear the bits in the unused black area.
    1443        3083 :     if (current_top != current_limit) {
    1444             :       MarkingState::Internal(page).bitmap()->ClearRange(
    1445             :           page->AddressToMarkbitIndex(current_top),
    1446        5324 :           page->AddressToMarkbitIndex(current_limit));
    1447             :       MarkingState::Internal(page).IncrementLiveBytes(
    1448        2662 :           -static_cast<int>(current_limit - current_top));
    1449             :     }
    1450             :   }
    1451             : 
    1452             :   SetTopAndLimit(NULL, NULL);
    1453             :   DCHECK_GE(current_limit, current_top);
    1454     2088526 :   Free(current_top, current_limit - current_top);
    1455             : }
    1456             : 
    1457         668 : void PagedSpace::IncreaseCapacity(size_t bytes) {
    1458             :   accounting_stats_.ExpandSpace(bytes);
    1459         668 : }
    1460             : 
    1461       13505 : void PagedSpace::ReleasePage(Page* page) {
    1462             :   DCHECK_EQ(0, MarkingState::Internal(page).live_bytes());
    1463             :   DCHECK_EQ(page->owner(), this);
    1464             : 
    1465       13505 :   free_list_.EvictFreeListItems(page);
    1466             :   DCHECK(!free_list_.ContainsPageFreeListItems(page));
    1467             : 
    1468       27010 :   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
    1469             :     allocation_info_.Reset(nullptr, nullptr);
    1470             :   }
    1471             : 
    1472             :   // If page is still in a list, unlink it from that list.
    1473       27010 :   if (page->next_chunk() != NULL) {
    1474             :     DCHECK(page->prev_chunk() != NULL);
    1475        3892 :     page->Unlink();
    1476             :   }
    1477             : 
    1478       13505 :   AccountUncommitted(page->size());
    1479             :   accounting_stats_.ShrinkSpace(page->area_size());
    1480       13505 :   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    1481       13505 : }
    1482             : 
    1483       73707 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
    1484      147414 :   return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
    1485             : }
    1486             : 
    1487             : #ifdef DEBUG
    1488             : void PagedSpace::Print() {}
    1489             : #endif
    1490             : 
    1491             : #ifdef VERIFY_HEAP
    1492             : void PagedSpace::Verify(ObjectVisitor* visitor) {
    1493             :   bool allocation_pointer_found_in_space =
    1494             :       (allocation_info_.top() == allocation_info_.limit());
    1495             :   for (Page* page : *this) {
    1496             :     CHECK(page->owner() == this);
    1497             :     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
    1498             :       allocation_pointer_found_in_space = true;
    1499             :     }
    1500             :     CHECK(page->SweepingDone());
    1501             :     HeapObjectIterator it(page);
    1502             :     Address end_of_previous_object = page->area_start();
    1503             :     Address top = page->area_end();
    1504             :     int black_size = 0;
    1505             :     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
    1506             :       CHECK(end_of_previous_object <= object->address());
    1507             : 
    1508             :       // The first word should be a map, and we expect all map pointers to
    1509             :       // be in map space.
    1510             :       Map* map = object->map();
    1511             :       CHECK(map->IsMap());
    1512             :       CHECK(heap()->map_space()->Contains(map));
    1513             : 
    1514             :       // Perform space-specific object verification.
    1515             :       VerifyObject(object);
    1516             : 
    1517             :       // The object itself should look OK.
    1518             :       object->ObjectVerify();
    1519             : 
    1520             :       // All the interior pointers should be contained in the heap.
    1521             :       int size = object->Size();
    1522             :       object->IterateBody(map->instance_type(), size, visitor);
    1523             :       if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
    1524             :         black_size += size;
    1525             :       }
    1526             : 
    1527             :       CHECK(object->address() + size <= top);
    1528             :       end_of_previous_object = object->address() + size;
    1529             :     }
    1530             :     CHECK_LE(black_size, MarkingState::Internal(page).live_bytes());
    1531             :   }
    1532             :   CHECK(allocation_pointer_found_in_space);
    1533             : }
    1534             : #endif  // VERIFY_HEAP
    1535             : 
    1536             : // -----------------------------------------------------------------------------
    1537             : // NewSpace implementation
    1538             : 
    1539       60789 : bool NewSpace::SetUp(size_t initial_semispace_capacity,
    1540             :                      size_t maximum_semispace_capacity) {
    1541             :   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
    1542             :   DCHECK(base::bits::IsPowerOfTwo32(
    1543             :       static_cast<uint32_t>(maximum_semispace_capacity)));
    1544             : 
    1545             :   to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
    1546             :   from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
    1547       60789 :   if (!to_space_.Commit()) {
    1548             :     return false;
    1549             :   }
    1550             :   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
    1551       60789 :   ResetAllocationInfo();
    1552             : 
    1553             :   // Allocate and set up the histogram arrays if necessary.
    1554       60789 :   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
    1555       60789 :   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
    1556             : #define SET_NAME(name)                        \
    1557             :   allocated_histogram_[name].set_name(#name); \
    1558             :   promoted_histogram_[name].set_name(#name);
    1559    17142498 :   INSTANCE_TYPE_LIST(SET_NAME)
    1560             : #undef SET_NAME
    1561             : 
    1562       60789 :   return true;
    1563             : }
    1564             : 
    1565             : 
    1566       59292 : void NewSpace::TearDown() {
    1567       59292 :   if (allocated_histogram_) {
    1568             :     DeleteArray(allocated_histogram_);
    1569       59292 :     allocated_histogram_ = NULL;
    1570             :   }
    1571       59292 :   if (promoted_histogram_) {
    1572             :     DeleteArray(promoted_histogram_);
    1573       59292 :     promoted_histogram_ = NULL;
    1574             :   }
    1575             : 
    1576             :   allocation_info_.Reset(nullptr, nullptr);
    1577             : 
    1578       59292 :   to_space_.TearDown();
    1579       59292 :   from_space_.TearDown();
    1580       59292 : }
    1581             : 
    1582      122535 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
    1583             : 
    1584             : 
    1585        2126 : void NewSpace::Grow() {
    1586             :   // Double the semispace size but only up to maximum capacity.
    1587             :   DCHECK(TotalCapacity() < MaximumCapacity());
    1588             :   size_t new_capacity =
    1589             :       Min(MaximumCapacity(),
    1590        4252 :           static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
    1591        2126 :   if (to_space_.GrowTo(new_capacity)) {
    1592             :     // Only grow from space if we managed to grow to-space.
    1593        2126 :     if (!from_space_.GrowTo(new_capacity)) {
    1594             :       // If we managed to grow to-space but couldn't grow from-space,
    1595             :       // attempt to shrink to-space.
    1596           0 :       if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
    1597             :         // We are in an inconsistent state because we could not
    1598             :         // commit/uncommit memory from new space.
    1599           0 :         CHECK(false);
    1600             :       }
    1601             :     }
    1602             :   }
    1603             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    1604        2126 : }
    1605             : 
    1606             : 
    1607       24429 : void NewSpace::Shrink() {
    1608       24429 :   size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
    1609             :   size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
    1610       24578 :   if (rounded_new_capacity < TotalCapacity() &&
    1611         149 :       to_space_.ShrinkTo(rounded_new_capacity)) {
    1612             :     // Only shrink from-space if we managed to shrink to-space.
    1613         149 :     from_space_.Reset();
    1614         149 :     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
    1615             :       // If we managed to shrink to-space but couldn't shrink from
    1616             :       // space, attempt to grow to-space again.
    1617           0 :       if (!to_space_.GrowTo(from_space_.current_capacity())) {
    1618             :         // We are in an inconsistent state because we could not
    1619             :         // commit/uncommit memory from new space.
    1620           0 :         CHECK(false);
    1621             :       }
    1622             :     }
    1623             :   }
    1624             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    1625       24429 : }
    1626             : 
    1627       53346 : bool NewSpace::Rebalance() {
    1628      106692 :   CHECK(heap()->promotion_queue()->is_empty());
    1629             :   // Order here is important to make use of the page pool.
    1630      106692 :   return to_space_.EnsureCurrentCapacity() &&
    1631      106692 :          from_space_.EnsureCurrentCapacity();
    1632             : }
    1633             : 
    1634      106692 : bool SemiSpace::EnsureCurrentCapacity() {
    1635      106692 :   if (is_committed()) {
    1636             :     const int expected_pages =
    1637      106692 :         static_cast<int>(current_capacity_ / Page::kPageSize);
    1638             :     int actual_pages = 0;
    1639             :     Page* current_page = anchor()->next_page();
    1640      495904 :     while (current_page != anchor()) {
    1641      282520 :       actual_pages++;
    1642             :       current_page = current_page->next_page();
    1643      282520 :       if (actual_pages > expected_pages) {
    1644             :         Page* to_remove = current_page->prev_page();
    1645             :         // Make sure we don't overtake the actual top pointer.
    1646         489 :         CHECK_NE(to_remove, current_page_);
    1647         489 :         to_remove->Unlink();
    1648             :         // Clear new space flags to avoid this page being treated as a new
    1649             :         // space page that is potentially being swept.
    1650             :         to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
    1651             :         heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    1652        2803 :             to_remove);
    1653             :       }
    1654             :     }
    1655      107849 :     while (actual_pages < expected_pages) {
    1656        1157 :       actual_pages++;
    1657             :       current_page =
    1658             :           heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    1659        1157 :               Page::kAllocatableMemory, this, executable());
    1660        1157 :       if (current_page == nullptr) return false;
    1661             :       DCHECK_NOT_NULL(current_page);
    1662        1157 :       current_page->InsertAfter(anchor());
    1663             :       MarkingState::Internal(current_page).ClearLiveness();
    1664             :       current_page->SetFlags(anchor()->prev_page()->GetFlags(),
    1665             :                              static_cast<uintptr_t>(Page::kCopyAllFlags));
    1666             :       heap()->CreateFillerObjectAt(current_page->area_start(),
    1667             :                                    static_cast<int>(current_page->area_size()),
    1668        2314 :                                    ClearRecordedSlots::kNo);
    1669             :     }
    1670             :   }
    1671             :   return true;
    1672             : }
    1673             : 
    1674      949795 : AllocationInfo LocalAllocationBuffer::Close() {
    1675      949795 :   if (IsValid()) {
    1676             :     heap_->CreateFillerObjectAt(
    1677             :         allocation_info_.top(),
    1678      125195 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    1679      125195 :         ClearRecordedSlots::kNo);
    1680      125199 :     const AllocationInfo old_info = allocation_info_;
    1681      125199 :     allocation_info_ = AllocationInfo(nullptr, nullptr);
    1682      125199 :     return old_info;
    1683             :   }
    1684             :   return AllocationInfo(nullptr, nullptr);
    1685             : }
    1686             : 
    1687             : 
    1688      267168 : LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
    1689             :                                              AllocationInfo allocation_info)
    1690      267168 :     : heap_(heap), allocation_info_(allocation_info) {
    1691      267168 :   if (IsValid()) {
    1692             :     heap_->CreateFillerObjectAt(
    1693             :         allocation_info_.top(),
    1694      207902 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    1695      207902 :         ClearRecordedSlots::kNo);
    1696             :   }
    1697      267150 : }
    1698             : 
    1699             : 
    1700      207839 : LocalAllocationBuffer::LocalAllocationBuffer(
    1701             :     const LocalAllocationBuffer& other) {
    1702      207839 :   *this = other;
    1703      207839 : }
    1704             : 
    1705             : 
    1706      415597 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
    1707             :     const LocalAllocationBuffer& other) {
    1708      415597 :   Close();
    1709      415603 :   heap_ = other.heap_;
    1710      415603 :   allocation_info_ = other.allocation_info_;
    1711             : 
    1712             :   // This is needed since we (a) cannot yet use move-semantics, and (b) want
    1713             :   // to make the use of the class easy by it as value and (c) implicitly call
    1714             :   // {Close} upon copy.
    1715             :   const_cast<LocalAllocationBuffer&>(other)
    1716             :       .allocation_info_.Reset(nullptr, nullptr);
    1717      415603 :   return *this;
    1718             : }
    1719             : 
    1720             : 
    1721      303387 : void NewSpace::UpdateAllocationInfo() {
    1722      303387 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1723      303387 :   allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
    1724      303387 :   UpdateInlineAllocationLimit(0);
    1725             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    1726      303387 : }
    1727             : 
    1728             : 
    1729      183324 : void NewSpace::ResetAllocationInfo() {
    1730      366648 :   Address old_top = allocation_info_.top();
    1731      183324 :   to_space_.Reset();
    1732      183324 :   UpdateAllocationInfo();
    1733             :   // Clear all mark-bits in the to-space.
    1734     1260752 :   for (Page* p : to_space_) {
    1735             :     MarkingState::Internal(p).ClearLiveness();
    1736             :   }
    1737      183324 :   InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
    1738      183324 : }
    1739             : 
    1740             : 
    1741      818504 : void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
    1742      818504 :   if (heap()->inline_allocation_disabled()) {
    1743             :     // Lowest limit when linear allocation was disabled.
    1744      818504 :     Address high = to_space_.page_high();
    1745      444862 :     Address new_top = allocation_info_.top() + size_in_bytes;
    1746             :     allocation_info_.set_limit(Min(new_top, high));
    1747      779430 :   } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
    1748             :     // Normal limit is the end of the current page.
    1749             :     allocation_info_.set_limit(to_space_.page_high());
    1750             :   } else {
    1751             :     // Lower limit during incremental marking.
    1752             :     Address high = to_space_.page_high();
    1753      405788 :     Address new_top = allocation_info_.top() + size_in_bytes;
    1754      405788 :     Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
    1755             :     allocation_info_.set_limit(Min(new_limit, high));
    1756             :   }
    1757             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    1758      818504 : }
    1759             : 
    1760             : 
    1761      187902 : bool NewSpace::AddFreshPage() {
    1762      187902 :   Address top = allocation_info_.top();
    1763             :   DCHECK(!Page::IsAtObjectStart(top));
    1764      187902 :   if (!to_space_.AdvancePage()) {
    1765             :     // No more pages left to advance.
    1766             :     return false;
    1767             :   }
    1768             : 
    1769             :   // Clear remainder of current page.
    1770      120063 :   Address limit = Page::FromAllocationAreaAddress(top)->area_end();
    1771      240126 :   if (heap()->gc_state() == Heap::SCAVENGE) {
    1772        6154 :     heap()->promotion_queue()->SetNewLimit(limit);
    1773             :   }
    1774             : 
    1775      120063 :   int remaining_in_page = static_cast<int>(limit - top);
    1776      120063 :   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
    1777      120063 :   UpdateAllocationInfo();
    1778             : 
    1779      120063 :   return true;
    1780             : }
    1781             : 
    1782             : 
    1783           0 : bool NewSpace::AddFreshPageSynchronized() {
    1784           0 :   base::LockGuard<base::Mutex> guard(&mutex_);
    1785           0 :   return AddFreshPage();
    1786             : }
    1787             : 
    1788             : 
    1789      307181 : bool NewSpace::EnsureAllocation(int size_in_bytes,
    1790             :                                 AllocationAlignment alignment) {
    1791      785854 :   Address old_top = allocation_info_.top();
    1792      426815 :   Address high = to_space_.page_high();
    1793      307181 :   int filler_size = Heap::GetFillToAlign(old_top, alignment);
    1794      307181 :   int aligned_size_in_bytes = size_in_bytes + filler_size;
    1795             : 
    1796      307181 :   if (old_top + aligned_size_in_bytes > high) {
    1797             :     // Not enough room in the page, try to allocate a new one.
    1798      187410 :     if (!AddFreshPage()) {
    1799             :       return false;
    1800             :     }
    1801             : 
    1802      119634 :     InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
    1803             : 
    1804             :     old_top = allocation_info_.top();
    1805             :     high = to_space_.page_high();
    1806      119634 :     filler_size = Heap::GetFillToAlign(old_top, alignment);
    1807             :   }
    1808             : 
    1809             :   DCHECK(old_top + aligned_size_in_bytes <= high);
    1810             : 
    1811      239405 :   if (allocation_info_.limit() < high) {
    1812             :     // Either the limit has been lowered because linear allocation was disabled
    1813             :     // or because incremental marking wants to get a chance to do a step,
    1814             :     // or because idle scavenge job wants to get a chance to post a task.
    1815             :     // Set the new limit accordingly.
    1816      146907 :     Address new_top = old_top + aligned_size_in_bytes;
    1817      146907 :     Address soon_object = old_top + filler_size;
    1818      146907 :     InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
    1819      146907 :     UpdateInlineAllocationLimit(aligned_size_in_bytes);
    1820             :   }
    1821             :   return true;
    1822             : }
    1823             : 
    1824             : 
    1825      245182 : void NewSpace::StartNextInlineAllocationStep() {
    1826      245182 :   if (!allocation_observers_paused_) {
    1827             :     top_on_previous_step_ =
    1828      243980 :         allocation_observers_->length() ? allocation_info_.top() : 0;
    1829      243980 :     UpdateInlineAllocationLimit(0);
    1830             :   }
    1831      245182 : }
    1832             : 
    1833             : 
    1834      405788 : intptr_t NewSpace::GetNextInlineAllocationStepSize() {
    1835             :   intptr_t next_step = 0;
    1836     1776460 :   for (int i = 0; i < allocation_observers_->length(); ++i) {
    1837     1447326 :     AllocationObserver* o = (*allocation_observers_)[i];
    1838             :     next_step = next_step ? Min(next_step, o->bytes_to_next_step())
    1839      482442 :                           : o->bytes_to_next_step();
    1840             :   }
    1841             :   DCHECK(allocation_observers_->length() == 0 || next_step != 0);
    1842      405788 :   return next_step;
    1843             : }
    1844             : 
    1845       62075 : void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
    1846             :   Space::AddAllocationObserver(observer);
    1847       62075 :   StartNextInlineAllocationStep();
    1848       62075 : }
    1849             : 
    1850       60551 : void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
    1851             :   Space::RemoveAllocationObserver(observer);
    1852       60551 :   StartNextInlineAllocationStep();
    1853       60551 : }
    1854             : 
    1855      122556 : void NewSpace::PauseAllocationObservers() {
    1856             :   // Do a step to account for memory allocated so far.
    1857      122556 :   InlineAllocationStep(top(), top(), nullptr, 0);
    1858             :   Space::PauseAllocationObservers();
    1859      122556 :   top_on_previous_step_ = 0;
    1860      122556 :   UpdateInlineAllocationLimit(0);
    1861      122556 : }
    1862             : 
    1863      122556 : void NewSpace::ResumeAllocationObservers() {
    1864             :   DCHECK(top_on_previous_step_ == 0);
    1865             :   Space::ResumeAllocationObservers();
    1866      122556 :   StartNextInlineAllocationStep();
    1867      122556 : }
    1868             : 
    1869             : 
    1870      572421 : void NewSpace::InlineAllocationStep(Address top, Address new_top,
    1871             :                                     Address soon_object, size_t size) {
    1872      572421 :   if (top_on_previous_step_) {
    1873      382446 :     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    1874     1682854 :     for (int i = 0; i < allocation_observers_->length(); ++i) {
    1875      458981 :       (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
    1876      458981 :                                                   size);
    1877             :     }
    1878      382446 :     top_on_previous_step_ = new_top;
    1879             :   }
    1880      572421 : }
    1881             : 
    1882       24569 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
    1883       24569 :   return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
    1884             : }
    1885             : 
    1886             : #ifdef VERIFY_HEAP
    1887             : // We do not use the SemiSpaceIterator because verification doesn't assume
    1888             : // that it works (it depends on the invariants we are checking).
    1889             : void NewSpace::Verify() {
    1890             :   // The allocation pointer should be in the space or at the very end.
    1891             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    1892             : 
    1893             :   // There should be objects packed in from the low address up to the
    1894             :   // allocation pointer.
    1895             :   Address current = to_space_.first_page()->area_start();
    1896             :   CHECK_EQ(current, to_space_.space_start());
    1897             : 
    1898             :   while (current != top()) {
    1899             :     if (!Page::IsAlignedToPageSize(current)) {
    1900             :       // The allocation pointer should not be in the middle of an object.
    1901             :       CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
    1902             :             current < top());
    1903             : 
    1904             :       HeapObject* object = HeapObject::FromAddress(current);
    1905             : 
    1906             :       // The first word should be a map, and we expect all map pointers to
    1907             :       // be in map space.
    1908             :       Map* map = object->map();
    1909             :       CHECK(map->IsMap());
    1910             :       CHECK(heap()->map_space()->Contains(map));
    1911             : 
    1912             :       // The object should not be code or a map.
    1913             :       CHECK(!object->IsMap());
    1914             :       CHECK(!object->IsAbstractCode());
    1915             : 
    1916             :       // The object itself should look OK.
    1917             :       object->ObjectVerify();
    1918             : 
    1919             :       // All the interior pointers should be contained in the heap.
    1920             :       VerifyPointersVisitor visitor;
    1921             :       int size = object->Size();
    1922             :       object->IterateBody(map->instance_type(), size, &visitor);
    1923             : 
    1924             :       current += size;
    1925             :     } else {
    1926             :       // At end of page, switch to next page.
    1927             :       Page* page = Page::FromAllocationAreaAddress(current)->next_page();
    1928             :       // Next page should be valid.
    1929             :       CHECK(!page->is_anchor());
    1930             :       current = page->area_start();
    1931             :     }
    1932             :   }
    1933             : 
    1934             :   // Check semi-spaces.
    1935             :   CHECK_EQ(from_space_.id(), kFromSpace);
    1936             :   CHECK_EQ(to_space_.id(), kToSpace);
    1937             :   from_space_.Verify();
    1938             :   to_space_.Verify();
    1939             : }
    1940             : #endif
    1941             : 
    1942             : // -----------------------------------------------------------------------------
    1943             : // SemiSpace implementation
    1944             : 
    1945           0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
    1946             :   DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
    1947      121578 :   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
    1948      121578 :   current_capacity_ = minimum_capacity_;
    1949      121578 :   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
    1950      121578 :   committed_ = false;
    1951           0 : }
    1952             : 
    1953             : 
    1954      118584 : void SemiSpace::TearDown() {
    1955             :   // Properly uncommit memory to keep the allocator counters in sync.
    1956      118584 :   if (is_committed()) {
    1957      425372 :     for (Page* p : *this) {
    1958      146938 :       ArrayBufferTracker::FreeAll(p);
    1959             :     }
    1960       65748 :     Uncommit();
    1961             :   }
    1962      118584 :   current_capacity_ = maximum_capacity_ = 0;
    1963      118584 : }
    1964             : 
    1965             : 
    1966       84975 : bool SemiSpace::Commit() {
    1967             :   DCHECK(!is_committed());
    1968       84975 :   Page* current = anchor();
    1969       84975 :   const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
    1970      256257 :   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
    1971             :     Page* new_page =
    1972             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    1973      171282 :             Page::kAllocatableMemory, this, executable());
    1974      171282 :     if (new_page == nullptr) {
    1975           0 :       RewindPages(current, pages_added);
    1976           0 :       return false;
    1977             :     }
    1978      171282 :     new_page->InsertAfter(current);
    1979             :     current = new_page;
    1980             :   }
    1981       84975 :   Reset();
    1982       84975 :   AccountCommitted(current_capacity_);
    1983       84975 :   if (age_mark_ == nullptr) {
    1984       72839 :     age_mark_ = first_page()->area_start();
    1985             :   }
    1986       84975 :   committed_ = true;
    1987       84975 :   return true;
    1988             : }
    1989             : 
    1990             : 
    1991       82960 : bool SemiSpace::Uncommit() {
    1992             :   DCHECK(is_committed());
    1993      347768 :   for (auto it = begin(); it != end();) {
    1994             :     Page* p = *(it++);
    1995      264808 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
    1996             :   }
    1997             :   anchor()->set_next_page(anchor());
    1998             :   anchor()->set_prev_page(anchor());
    1999       82960 :   AccountUncommitted(current_capacity_);
    2000       82960 :   committed_ = false;
    2001       82960 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2002       82960 :   return true;
    2003             : }
    2004             : 
    2005             : 
    2006           8 : size_t SemiSpace::CommittedPhysicalMemory() {
    2007           8 :   if (!is_committed()) return 0;
    2008             :   size_t size = 0;
    2009          48 :   for (Page* p : *this) {
    2010          16 :     size += p->CommittedPhysicalMemory();
    2011             :   }
    2012           8 :   return size;
    2013             : }
    2014             : 
    2015        4252 : bool SemiSpace::GrowTo(size_t new_capacity) {
    2016        4252 :   if (!is_committed()) {
    2017         100 :     if (!Commit()) return false;
    2018             :   }
    2019             :   DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
    2020             :   DCHECK_LE(new_capacity, maximum_capacity_);
    2021             :   DCHECK_GT(new_capacity, current_capacity_);
    2022        4252 :   const size_t delta = new_capacity - current_capacity_;
    2023             :   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
    2024        4252 :   const int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2025             :   Page* last_page = anchor()->prev_page();
    2026             :   DCHECK_NE(last_page, anchor());
    2027       20812 :   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
    2028             :     Page* new_page =
    2029             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2030       16560 :             Page::kAllocatableMemory, this, executable());
    2031       16560 :     if (new_page == nullptr) {
    2032           0 :       RewindPages(last_page, pages_added);
    2033           0 :       return false;
    2034             :     }
    2035       16560 :     new_page->InsertAfter(last_page);
    2036             :     MarkingState::Internal(new_page).ClearLiveness();
    2037             :     // Duplicate the flags that was set on the old page.
    2038             :     new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
    2039             :     last_page = new_page;
    2040             :   }
    2041             :   AccountCommitted(delta);
    2042        4252 :   current_capacity_ = new_capacity;
    2043        4252 :   return true;
    2044             : }
    2045             : 
    2046           0 : void SemiSpace::RewindPages(Page* start, int num_pages) {
    2047             :   Page* new_last_page = nullptr;
    2048             :   Page* last_page = start;
    2049           0 :   while (num_pages > 0) {
    2050             :     DCHECK_NE(last_page, anchor());
    2051             :     new_last_page = last_page->prev_page();
    2052             :     last_page->prev_page()->set_next_page(last_page->next_page());
    2053             :     last_page->next_page()->set_prev_page(last_page->prev_page());
    2054             :     last_page = new_last_page;
    2055           0 :     num_pages--;
    2056             :   }
    2057           0 : }
    2058             : 
    2059         298 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
    2060             :   DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
    2061             :   DCHECK_GE(new_capacity, minimum_capacity_);
    2062             :   DCHECK_LT(new_capacity, current_capacity_);
    2063         298 :   if (is_committed()) {
    2064         298 :     const size_t delta = current_capacity_ - new_capacity;
    2065             :     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
    2066         298 :     int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2067             :     Page* new_last_page;
    2068             :     Page* last_page;
    2069        2056 :     while (delta_pages > 0) {
    2070             :       last_page = anchor()->prev_page();
    2071             :       new_last_page = last_page->prev_page();
    2072        1460 :       new_last_page->set_next_page(anchor());
    2073             :       anchor()->set_prev_page(new_last_page);
    2074             :       heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    2075        1758 :           last_page);
    2076        1460 :       delta_pages--;
    2077             :     }
    2078             :     AccountUncommitted(delta);
    2079         298 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2080             :   }
    2081         298 :   current_capacity_ = new_capacity;
    2082         298 :   return true;
    2083             : }
    2084             : 
    2085      245070 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
    2086             :   anchor_.set_owner(this);
    2087      245070 :   anchor_.prev_page()->set_next_page(&anchor_);
    2088             :   anchor_.next_page()->set_prev_page(&anchor_);
    2089             : 
    2090     1790044 :   for (Page* page : *this) {
    2091             :     page->set_owner(this);
    2092      649952 :     page->SetFlags(flags, mask);
    2093      649952 :     if (id_ == kToSpace) {
    2094             :       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
    2095             :       page->SetFlag(MemoryChunk::IN_TO_SPACE);
    2096             :       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2097             :       MarkingState::Internal(page).SetLiveBytes(0);
    2098             :     } else {
    2099             :       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
    2100             :       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
    2101             :     }
    2102             :     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
    2103             :            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
    2104             :   }
    2105      245070 : }
    2106             : 
    2107             : 
    2108      268448 : void SemiSpace::Reset() {
    2109             :   DCHECK_NE(anchor_.next_page(), &anchor_);
    2110      268448 :   current_page_ = anchor_.next_page();
    2111      268448 :   pages_used_ = 0;
    2112      268448 : }
    2113             : 
    2114         489 : void SemiSpace::RemovePage(Page* page) {
    2115         489 :   if (current_page_ == page) {
    2116         142 :     current_page_ = page->prev_page();
    2117             :   }
    2118         489 :   page->Unlink();
    2119         489 : }
    2120             : 
    2121         489 : void SemiSpace::PrependPage(Page* page) {
    2122             :   page->SetFlags(current_page()->GetFlags(),
    2123             :                  static_cast<uintptr_t>(Page::kCopyAllFlags));
    2124             :   page->set_owner(this);
    2125         489 :   page->InsertAfter(anchor());
    2126         489 :   pages_used_++;
    2127         489 : }
    2128             : 
    2129      122535 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
    2130             :   // We won't be swapping semispaces without data in them.
    2131             :   DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
    2132             :   DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
    2133             : 
    2134      122535 :   intptr_t saved_to_space_flags = to->current_page()->GetFlags();
    2135             : 
    2136             :   // We swap all properties but id_.
    2137             :   std::swap(from->current_capacity_, to->current_capacity_);
    2138             :   std::swap(from->maximum_capacity_, to->maximum_capacity_);
    2139             :   std::swap(from->minimum_capacity_, to->minimum_capacity_);
    2140             :   std::swap(from->age_mark_, to->age_mark_);
    2141             :   std::swap(from->committed_, to->committed_);
    2142      122535 :   std::swap(from->anchor_, to->anchor_);
    2143             :   std::swap(from->current_page_, to->current_page_);
    2144             : 
    2145      122535 :   to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
    2146      122535 :   from->FixPagesFlags(0, 0);
    2147      122535 : }
    2148             : 
    2149      122535 : void SemiSpace::set_age_mark(Address mark) {
    2150             :   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
    2151      122535 :   age_mark_ = mark;
    2152             :   // Mark all pages up to the one containing mark.
    2153      626713 :   for (Page* p : PageRange(space_start(), mark)) {
    2154             :     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2155             :   }
    2156      122535 : }
    2157             : 
    2158           0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
    2159             :   // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
    2160           0 :   UNREACHABLE();
    2161             :   return std::unique_ptr<ObjectIterator>();
    2162             : }
    2163             : 
    2164             : #ifdef DEBUG
    2165             : void SemiSpace::Print() {}
    2166             : #endif
    2167             : 
    2168             : #ifdef VERIFY_HEAP
    2169             : void SemiSpace::Verify() {
    2170             :   bool is_from_space = (id_ == kFromSpace);
    2171             :   Page* page = anchor_.next_page();
    2172             :   CHECK(anchor_.owner() == this);
    2173             :   while (page != &anchor_) {
    2174             :     CHECK_EQ(page->owner(), this);
    2175             :     CHECK(page->InNewSpace());
    2176             :     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
    2177             :                                         : MemoryChunk::IN_TO_SPACE));
    2178             :     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
    2179             :                                          : MemoryChunk::IN_FROM_SPACE));
    2180             :     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    2181             :     if (!is_from_space) {
    2182             :       // The pointers-from-here-are-interesting flag isn't updated dynamically
    2183             :       // on from-space pages, so it might be out of sync with the marking state.
    2184             :       if (page->heap()->incremental_marking()->IsMarking()) {
    2185             :         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2186             :       } else {
    2187             :         CHECK(
    2188             :             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2189             :       }
    2190             :       // TODO(gc): Check that the live_bytes_count_ field matches the
    2191             :       // black marking on the page (if we make it match in new-space).
    2192             :     }
    2193             :     CHECK_EQ(page->prev_page()->next_page(), page);
    2194             :     page = page->next_page();
    2195             :   }
    2196             : }
    2197             : #endif
    2198             : 
    2199             : #ifdef DEBUG
    2200             : void SemiSpace::AssertValidRange(Address start, Address end) {
    2201             :   // Addresses belong to same semi-space
    2202             :   Page* page = Page::FromAllocationAreaAddress(start);
    2203             :   Page* end_page = Page::FromAllocationAreaAddress(end);
    2204             :   SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
    2205             :   CHECK_EQ(space, end_page->owner());
    2206             :   // Start address is before end address, either on same page,
    2207             :   // or end address is on a later page in the linked list of
    2208             :   // semi-space pages.
    2209             :   if (page == end_page) {
    2210             :     CHECK_LE(start, end);
    2211             :   } else {
    2212             :     while (page != end_page) {
    2213             :       page = page->next_page();
    2214             :       CHECK_NE(page, space->anchor());
    2215             :     }
    2216             :   }
    2217             : }
    2218             : #endif
    2219             : 
    2220             : 
    2221             : // -----------------------------------------------------------------------------
    2222             : // SemiSpaceIterator implementation.
    2223             : 
    2224       24569 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
    2225             :   Initialize(space->bottom(), space->top());
    2226           0 : }
    2227             : 
    2228             : 
    2229           0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
    2230             :   SemiSpace::AssertValidRange(start, end);
    2231       24569 :   current_ = start;
    2232       24569 :   limit_ = end;
    2233           0 : }
    2234             : 
    2235             : #ifdef DEBUG
    2236             : // heap_histograms is shared, always clear it before using it.
    2237             : static void ClearHistograms(Isolate* isolate) {
    2238             : // We reset the name each time, though it hasn't changed.
    2239             : #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
    2240             :   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
    2241             : #undef DEF_TYPE_NAME
    2242             : 
    2243             : #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
    2244             :   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
    2245             : #undef CLEAR_HISTOGRAM
    2246             : 
    2247             :   isolate->js_spill_information()->Clear();
    2248             : }
    2249             : 
    2250             : static int CollectHistogramInfo(HeapObject* obj) {
    2251             :   Isolate* isolate = obj->GetIsolate();
    2252             :   InstanceType type = obj->map()->instance_type();
    2253             :   DCHECK(0 <= type && type <= LAST_TYPE);
    2254             :   DCHECK(isolate->heap_histograms()[type].name() != NULL);
    2255             :   isolate->heap_histograms()[type].increment_number(1);
    2256             :   isolate->heap_histograms()[type].increment_bytes(obj->Size());
    2257             : 
    2258             :   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
    2259             :     JSObject::cast(obj)
    2260             :         ->IncrementSpillStatistics(isolate->js_spill_information());
    2261             :   }
    2262             : 
    2263             :   return obj->Size();
    2264             : }
    2265             : 
    2266             : 
    2267             : static void ReportHistogram(Isolate* isolate, bool print_spill) {
    2268             :   PrintF("\n  Object Histogram:\n");
    2269             :   for (int i = 0; i <= LAST_TYPE; i++) {
    2270             :     if (isolate->heap_histograms()[i].number() > 0) {
    2271             :       PrintF("    %-34s%10d (%10d bytes)\n",
    2272             :              isolate->heap_histograms()[i].name(),
    2273             :              isolate->heap_histograms()[i].number(),
    2274             :              isolate->heap_histograms()[i].bytes());
    2275             :     }
    2276             :   }
    2277             :   PrintF("\n");
    2278             : 
    2279             :   // Summarize string types.
    2280             :   int string_number = 0;
    2281             :   int string_bytes = 0;
    2282             : #define INCREMENT(type, size, name, camel_name)               \
    2283             :   string_number += isolate->heap_histograms()[type].number(); \
    2284             :   string_bytes += isolate->heap_histograms()[type].bytes();
    2285             :   STRING_TYPE_LIST(INCREMENT)
    2286             : #undef INCREMENT
    2287             :   if (string_number > 0) {
    2288             :     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
    2289             :            string_bytes);
    2290             :   }
    2291             : 
    2292             :   if (FLAG_collect_heap_spill_statistics && print_spill) {
    2293             :     isolate->js_spill_information()->Print();
    2294             :   }
    2295             : }
    2296             : #endif  // DEBUG
    2297             : 
    2298             : 
    2299             : // Support for statistics gathering for --heap-stats and --log-gc.
    2300           0 : void NewSpace::ClearHistograms() {
    2301           0 :   for (int i = 0; i <= LAST_TYPE; i++) {
    2302           0 :     allocated_histogram_[i].clear();
    2303           0 :     promoted_histogram_[i].clear();
    2304             :   }
    2305           0 : }
    2306             : 
    2307             : 
    2308             : // Because the copying collector does not touch garbage objects, we iterate
    2309             : // the new space before a collection to get a histogram of allocated objects.
    2310             : // This only happens when --log-gc flag is set.
    2311           0 : void NewSpace::CollectStatistics() {
    2312             :   ClearHistograms();
    2313             :   SemiSpaceIterator it(this);
    2314           0 :   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
    2315           0 :     RecordAllocation(obj);
    2316           0 : }
    2317             : 
    2318             : 
    2319           0 : static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
    2320             :                                const char* description) {
    2321           0 :   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
    2322             :   // Lump all the string types together.
    2323             :   int string_number = 0;
    2324             :   int string_bytes = 0;
    2325             : #define INCREMENT(type, size, name, camel_name) \
    2326             :   string_number += info[type].number();         \
    2327             :   string_bytes += info[type].bytes();
    2328           0 :   STRING_TYPE_LIST(INCREMENT)
    2329             : #undef INCREMENT
    2330           0 :   if (string_number > 0) {
    2331           0 :     LOG(isolate,
    2332             :         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
    2333             :   }
    2334             : 
    2335             :   // Then do the other types.
    2336           0 :   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
    2337           0 :     if (info[i].number() > 0) {
    2338           0 :       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
    2339             :                                        info[i].bytes()));
    2340             :     }
    2341             :   }
    2342           0 :   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
    2343           0 : }
    2344             : 
    2345             : 
    2346           0 : void NewSpace::ReportStatistics() {
    2347             : #ifdef DEBUG
    2348             :   if (FLAG_heap_stats) {
    2349             :     float pct = static_cast<float>(Available()) / TotalCapacity();
    2350             :     PrintF("  capacity: %" PRIuS ", available: %" PRIuS ", %%%d\n",
    2351             :            TotalCapacity(), Available(), static_cast<int>(pct * 100));
    2352             :     PrintF("\n  Object Histogram:\n");
    2353             :     for (int i = 0; i <= LAST_TYPE; i++) {
    2354             :       if (allocated_histogram_[i].number() > 0) {
    2355             :         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
    2356             :                allocated_histogram_[i].number(),
    2357             :                allocated_histogram_[i].bytes());
    2358             :       }
    2359             :     }
    2360             :     PrintF("\n");
    2361             :   }
    2362             : #endif  // DEBUG
    2363             : 
    2364           0 :   if (FLAG_log_gc) {
    2365           0 :     Isolate* isolate = heap()->isolate();
    2366           0 :     DoReportStatistics(isolate, allocated_histogram_, "allocated");
    2367           0 :     DoReportStatistics(isolate, promoted_histogram_, "promoted");
    2368             :   }
    2369           0 : }
    2370             : 
    2371             : 
    2372           0 : void NewSpace::RecordAllocation(HeapObject* obj) {
    2373             :   InstanceType type = obj->map()->instance_type();
    2374             :   DCHECK(0 <= type && type <= LAST_TYPE);
    2375           0 :   allocated_histogram_[type].increment_number(1);
    2376           0 :   allocated_histogram_[type].increment_bytes(obj->Size());
    2377           0 : }
    2378             : 
    2379             : 
    2380           0 : void NewSpace::RecordPromotion(HeapObject* obj) {
    2381             :   InstanceType type = obj->map()->instance_type();
    2382             :   DCHECK(0 <= type && type <= LAST_TYPE);
    2383           0 :   promoted_histogram_[type].increment_number(1);
    2384           0 :   promoted_histogram_[type].increment_bytes(obj->Size());
    2385           0 : }
    2386             : 
    2387             : 
    2388           7 : size_t NewSpace::CommittedPhysicalMemory() {
    2389           7 :   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
    2390           7 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2391           7 :   size_t size = to_space_.CommittedPhysicalMemory();
    2392           7 :   if (from_space_.is_committed()) {
    2393           1 :     size += from_space_.CommittedPhysicalMemory();
    2394             :   }
    2395           7 :   return size;
    2396             : }
    2397             : 
    2398             : 
    2399             : // -----------------------------------------------------------------------------
    2400             : // Free lists for old object spaces implementation
    2401             : 
    2402             : 
    2403           0 : void FreeListCategory::Reset() {
    2404             :   set_top(nullptr);
    2405             :   set_prev(nullptr);
    2406             :   set_next(nullptr);
    2407     1375382 :   available_ = 0;
    2408           0 : }
    2409             : 
    2410     1725651 : FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
    2411             :   DCHECK(page()->CanAllocate());
    2412             : 
    2413             :   FreeSpace* node = top();
    2414     1725651 :   if (node == nullptr) return nullptr;
    2415             :   set_top(node->next());
    2416     1633033 :   *node_size = node->Size();
    2417     1633033 :   available_ -= *node_size;
    2418           0 :   return node;
    2419             : }
    2420             : 
    2421       31833 : FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
    2422             :                                                  size_t* node_size) {
    2423             :   DCHECK(page()->CanAllocate());
    2424             : 
    2425             :   FreeSpace* node = PickNodeFromList(node_size);
    2426       31833 :   if ((node != nullptr) && (*node_size < minimum_size)) {
    2427       20556 :     Free(node, *node_size, kLinkCategory);
    2428       20556 :     *node_size = 0;
    2429       20556 :     return nullptr;
    2430             :   }
    2431             :   return node;
    2432             : }
    2433             : 
    2434      786268 : FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
    2435      786268 :                                                  size_t* node_size) {
    2436             :   DCHECK(page()->CanAllocate());
    2437             : 
    2438             :   FreeSpace* prev_non_evac_node = nullptr;
    2439     1578419 :   for (FreeSpace* cur_node = top(); cur_node != nullptr;
    2440             :        cur_node = cur_node->next()) {
    2441      621336 :     size_t size = cur_node->size();
    2442      621336 :     if (size >= minimum_size) {
    2443             :       DCHECK_GE(available_, size);
    2444      615453 :       available_ -= size;
    2445      615453 :       if (cur_node == top()) {
    2446             :         set_top(cur_node->next());
    2447             :       }
    2448      615453 :       if (prev_non_evac_node != nullptr) {
    2449             :         prev_non_evac_node->set_next(cur_node->next());
    2450             :       }
    2451      615453 :       *node_size = size;
    2452      615453 :       return cur_node;
    2453             :     }
    2454             : 
    2455             :     prev_non_evac_node = cur_node;
    2456             :   }
    2457             :   return nullptr;
    2458             : }
    2459             : 
    2460    35106387 : bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
    2461    38104251 :                             FreeMode mode) {
    2462    35106387 :   if (!page()->CanAllocate()) return false;
    2463             : 
    2464             :   free_space->set_next(top());
    2465             :   set_top(free_space);
    2466    35108508 :   available_ += size_in_bytes;
    2467    38104251 :   if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    2468             :     owner()->AddCategory(this);
    2469             :   }
    2470             :   return true;
    2471             : }
    2472             : 
    2473             : 
    2474      425193 : void FreeListCategory::RepairFreeList(Heap* heap) {
    2475             :   FreeSpace* n = top();
    2476      425193 :   while (n != NULL) {
    2477             :     Map** map_location = reinterpret_cast<Map**>(n->address());
    2478      121488 :     if (*map_location == NULL) {
    2479      121488 :       *map_location = heap->free_space_map();
    2480             :     } else {
    2481             :       DCHECK(*map_location == heap->free_space_map());
    2482             :     }
    2483             :     n = n->next();
    2484             :   }
    2485           0 : }
    2486             : 
    2487     3089220 : void FreeListCategory::Relink() {
    2488             :   DCHECK(!is_linked());
    2489             :   owner()->AddCategory(this);
    2490     3089220 : }
    2491             : 
    2492      140358 : void FreeListCategory::Invalidate() {
    2493             :   page()->remove_available_in_free_list(available());
    2494             :   Reset();
    2495      140358 :   type_ = kInvalidCategory;
    2496      140358 : }
    2497             : 
    2498      300881 : FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
    2499     1805286 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2500     1805286 :     categories_[i] = nullptr;
    2501             :   }
    2502      300881 :   Reset();
    2503           0 : }
    2504             : 
    2505             : 
    2506      643395 : void FreeList::Reset() {
    2507             :   ForAllFreeListCategories(
    2508             :       [](FreeListCategory* category) { category->Reset(); });
    2509     3860370 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2510     3860370 :     categories_[i] = nullptr;
    2511             :   }
    2512      643395 :   ResetStats();
    2513      643395 : }
    2514             : 
    2515    72753289 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
    2516    36720330 :   if (size_in_bytes == 0) return 0;
    2517             : 
    2518             :   owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
    2519    72065918 :                                         ClearRecordedSlots::kNo);
    2520             : 
    2521             :   Page* page = Page::FromAddress(start);
    2522             : 
    2523             :   // Blocks have to be a minimum size to hold free list items.
    2524    36031834 :   if (size_in_bytes < kMinBlockSize) {
    2525             :     page->add_wasted_memory(size_in_bytes);
    2526             :     wasted_bytes_.Increment(size_in_bytes);
    2527      942152 :     return size_in_bytes;
    2528             :   }
    2529             : 
    2530    35089758 :   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
    2531             :   // Insert other blocks at the head of a free list of the appropriate
    2532             :   // magnitude.
    2533             :   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
    2534    35089758 :   if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
    2535             :     page->add_available_in_free_list(size_in_bytes);
    2536             :   }
    2537             :   DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
    2538             :   return 0;
    2539             : }
    2540             : 
    2541     3474996 : FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
    2542             :   FreeListCategoryIterator it(this, type);
    2543             :   FreeSpace* node = nullptr;
    2544     7039989 :   while (it.HasNext()) {
    2545             :     FreeListCategory* current = it.Next();
    2546             :     node = current->PickNodeFromList(node_size);
    2547     1693818 :     if (node != nullptr) {
    2548             :       Page::FromAddress(node->address())
    2549     1603821 :           ->remove_available_in_free_list(*node_size);
    2550             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2551     1603822 :       return node;
    2552             :     }
    2553             :     RemoveCategory(current);
    2554             :   }
    2555             :   return node;
    2556             : }
    2557             : 
    2558      312449 : FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
    2559             :                                    size_t minimum_size) {
    2560      312449 :   if (categories_[type] == nullptr) return nullptr;
    2561             :   FreeSpace* node =
    2562       31833 :       categories_[type]->TryPickNodeFromList(minimum_size, node_size);
    2563       31833 :   if (node != nullptr) {
    2564             :     Page::FromAddress(node->address())
    2565        8655 :         ->remove_available_in_free_list(*node_size);
    2566             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2567             :   }
    2568       31833 :   return node;
    2569             : }
    2570             : 
    2571     1221948 : FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
    2572             :                                          size_t* node_size,
    2573             :                                          size_t minimum_size) {
    2574             :   FreeListCategoryIterator it(this, type);
    2575             :   FreeSpace* node = nullptr;
    2576     2614711 :   while (it.HasNext()) {
    2577             :     FreeListCategory* current = it.Next();
    2578      786268 :     node = current->SearchForNodeInList(minimum_size, node_size);
    2579      786268 :     if (node != nullptr) {
    2580             :       Page::FromAddress(node->address())
    2581      615453 :           ->remove_available_in_free_list(*node_size);
    2582             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2583      615453 :       return node;
    2584             :     }
    2585      170815 :     if (current->is_empty()) {
    2586             :       RemoveCategory(current);
    2587             :     }
    2588             :   }
    2589             :   return node;
    2590             : }
    2591             : 
    2592     2825778 : FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
    2593             :   FreeSpace* node = nullptr;
    2594             : 
    2595             :   // First try the allocation fast path: try to allocate the minimum element
    2596             :   // size of a free list category. This operation is constant time.
    2597             :   FreeListCategoryType type =
    2598             :       SelectFastAllocationFreeListCategoryType(size_in_bytes);
    2599     4696952 :   for (int i = type; i < kHuge; i++) {
    2600     3475000 :     node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
    2601     3474997 :     if (node != nullptr) return node;
    2602             :   }
    2603             : 
    2604             :   // Next search the huge list for free list nodes. This takes linear time in
    2605             :   // the number of huge elements.
    2606     1221952 :   node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
    2607     1221952 :   if (node != nullptr) {
    2608             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2609             :     return node;
    2610             :   }
    2611             : 
    2612             :   // We need a huge block of memory, but we didn't find anything in the huge
    2613             :   // list.
    2614      606499 :   if (type == kHuge) return nullptr;
    2615             : 
    2616             :   // Now search the best fitting free list for a node that has at least the
    2617             :   // requested size.
    2618             :   type = SelectFreeListCategoryType(size_in_bytes);
    2619      312446 :   node = TryFindNodeIn(type, node_size, size_in_bytes);
    2620             : 
    2621             :   DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2622      312452 :   return node;
    2623             : }
    2624             : 
    2625             : // Allocation on the old space free list.  If it succeeds then a new linear
    2626             : // allocation space has been set up with the top and limit of the space.  If
    2627             : // the allocation fails then NULL is returned, and the caller can perform a GC
    2628             : // or allocate a new page before retrying.
    2629     2825774 : HeapObject* FreeList::Allocate(size_t size_in_bytes) {
    2630             :   DCHECK(size_in_bytes <= kMaxBlockSize);
    2631             :   DCHECK(IsAligned(size_in_bytes, kPointerSize));
    2632             :   DCHECK_LE(owner_->top(), owner_->limit());
    2633             : #ifdef DEBUG
    2634             :   if (owner_->top() != owner_->limit()) {
    2635             :     DCHECK_EQ(Page::FromAddress(owner_->top()),
    2636             :               Page::FromAddress(owner_->limit() - 1));
    2637             :   }
    2638             : #endif
    2639             :   // Don't free list allocate if there is linear space available.
    2640             :   DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
    2641             :             size_in_bytes);
    2642             : 
    2643             :   // Mark the old linear allocation area with a free space map so it can be
    2644             :   // skipped when scanning the heap.  This also puts it back in the free list
    2645             :   // if it is big enough.
    2646     2825774 :   owner_->EmptyAllocationInfo();
    2647             : 
    2648             :   owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    2649     5053708 :       Heap::kNoGCFlags, kNoGCCallbackFlags);
    2650             : 
    2651     2825776 :   size_t new_node_size = 0;
    2652     2825776 :   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
    2653     2825780 :   if (new_node == nullptr) return nullptr;
    2654             : 
    2655             :   DCHECK_GE(new_node_size, size_in_bytes);
    2656     2227930 :   size_t bytes_left = new_node_size - size_in_bytes;
    2657             : 
    2658             : #ifdef DEBUG
    2659             :   for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
    2660             :     reinterpret_cast<Object**>(new_node->address())[i] =
    2661             :         Smi::FromInt(kCodeZapValue);
    2662             :   }
    2663             : #endif
    2664             : 
    2665             :   // The old-space-step might have finished sweeping and restarted marking.
    2666             :   // Verify that it did not turn the page of the new node into an evacuation
    2667             :   // candidate.
    2668             :   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
    2669             : 
    2670             :   const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
    2671             : 
    2672             :   // Memory in the linear allocation area is counted as allocated.  We may free
    2673             :   // a little of this again immediately - see below.
    2674     2227930 :   owner_->AccountAllocatedBytes(new_node_size);
    2675             : 
    2676     2227930 :   if (owner_->heap()->inline_allocation_disabled()) {
    2677             :     // Keep the linear allocation area empty if requested to do so, just
    2678             :     // return area back to the free list instead.
    2679       90589 :     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
    2680             :     owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
    2681       90589 :                               new_node->address() + size_in_bytes);
    2682     2568850 :   } else if (bytes_left > kThreshold &&
    2683     2144052 :              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
    2684             :              FLAG_incremental_marking) {
    2685        6711 :     size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
    2686             :     // We don't want to give too large linear areas to the allocator while
    2687             :     // incremental marking is going on, because we won't check again whether
    2688             :     // we want to do another increment until the linear area is used up.
    2689             :     DCHECK_GE(new_node_size, size_in_bytes + linear_size);
    2690        6711 :     owner_->Free(new_node->address() + size_in_bytes + linear_size,
    2691       13422 :                  new_node_size - size_in_bytes - linear_size);
    2692             :     owner_->SetAllocationInfo(
    2693             :         new_node->address() + size_in_bytes,
    2694        6711 :         new_node->address() + size_in_bytes + linear_size);
    2695             :   } else {
    2696             :     // Normally we give the rest of the node to the allocator as its new
    2697             :     // linear allocation area.
    2698             :     owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
    2699     2130630 :                               new_node->address() + new_node_size);
    2700             :   }
    2701             : 
    2702     2227929 :   return new_node;
    2703             : }
    2704             : 
    2705       23393 : size_t FreeList::EvictFreeListItems(Page* page) {
    2706       23393 :   size_t sum = 0;
    2707             :   page->ForAllFreeListCategories(
    2708      140358 :       [this, &sum](FreeListCategory* category) {
    2709             :         DCHECK_EQ(this, category->owner());
    2710      140358 :         sum += category->available();
    2711      140358 :         RemoveCategory(category);
    2712      140358 :         category->Invalidate();
    2713      140358 :       });
    2714       23393 :   return sum;
    2715             : }
    2716             : 
    2717           0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
    2718           0 :   bool contained = false;
    2719             :   page->ForAllFreeListCategories(
    2720           0 :       [this, &contained](FreeListCategory* category) {
    2721           0 :         if (category->owner() == this && category->is_linked()) {
    2722           0 :           contained = true;
    2723             :         }
    2724           0 :       });
    2725           0 :   return contained;
    2726             : }
    2727             : 
    2728           0 : void FreeList::RepairLists(Heap* heap) {
    2729             :   ForAllFreeListCategories(
    2730      182217 :       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
    2731           0 : }
    2732             : 
    2733           0 : bool FreeList::AddCategory(FreeListCategory* category) {
    2734     4231493 :   FreeListCategoryType type = category->type_;
    2735     4231493 :   FreeListCategory* top = categories_[type];
    2736             : 
    2737     4231493 :   if (category->is_empty()) return false;
    2738     2410963 :   if (top == category) return false;
    2739             : 
    2740             :   // Common double-linked list insertion.
    2741     1956006 :   if (top != nullptr) {
    2742             :     top->set_prev(category);
    2743             :   }
    2744             :   category->set_next(top);
    2745     1956006 :   categories_[type] = category;
    2746           0 :   return true;
    2747             : }
    2748             : 
    2749     1670246 : void FreeList::RemoveCategory(FreeListCategory* category) {
    2750      606753 :   FreeListCategoryType type = category->type_;
    2751      606753 :   FreeListCategory* top = categories_[type];
    2752             : 
    2753             :   // Common double-linked list removal.
    2754      606753 :   if (top == category) {
    2755      385962 :     categories_[type] = category->next();
    2756             :   }
    2757      606753 :   if (category->prev() != nullptr) {
    2758             :     category->prev()->set_next(category->next());
    2759             :   }
    2760      606753 :   if (category->next() != nullptr) {
    2761             :     category->next()->set_prev(category->prev());
    2762             :   }
    2763             :   category->set_next(nullptr);
    2764             :   category->set_prev(nullptr);
    2765          12 : }
    2766             : 
    2767           0 : void FreeList::PrintCategories(FreeListCategoryType type) {
    2768             :   FreeListCategoryIterator it(this, type);
    2769             :   PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
    2770           0 :          static_cast<void*>(categories_[type]), type);
    2771           0 :   while (it.HasNext()) {
    2772             :     FreeListCategory* current = it.Next();
    2773           0 :     PrintF("%p -> ", static_cast<void*>(current));
    2774             :   }
    2775           0 :   PrintF("null\n");
    2776           0 : }
    2777             : 
    2778             : 
    2779             : #ifdef DEBUG
    2780             : size_t FreeListCategory::SumFreeList() {
    2781             :   size_t sum = 0;
    2782             :   FreeSpace* cur = top();
    2783             :   while (cur != NULL) {
    2784             :     DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
    2785             :     sum += cur->nobarrier_size();
    2786             :     cur = cur->next();
    2787             :   }
    2788             :   return sum;
    2789             : }
    2790             : 
    2791             : int FreeListCategory::FreeListLength() {
    2792             :   int length = 0;
    2793             :   FreeSpace* cur = top();
    2794             :   while (cur != NULL) {
    2795             :     length++;
    2796             :     cur = cur->next();
    2797             :     if (length == kVeryLongFreeList) return length;
    2798             :   }
    2799             :   return length;
    2800             : }
    2801             : 
    2802             : bool FreeList::IsVeryLong() {
    2803             :   int len = 0;
    2804             :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2805             :     FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    2806             :     while (it.HasNext()) {
    2807             :       len += it.Next()->FreeListLength();
    2808             :       if (len >= FreeListCategory::kVeryLongFreeList) return true;
    2809             :     }
    2810             :   }
    2811             :   return false;
    2812             : }
    2813             : 
    2814             : 
    2815             : // This can take a very long time because it is linear in the number of entries
    2816             : // on the free list, so it should not be called if FreeListLength returns
    2817             : // kVeryLongFreeList.
    2818             : size_t FreeList::SumFreeLists() {
    2819             :   size_t sum = 0;
    2820             :   ForAllFreeListCategories(
    2821             :       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
    2822             :   return sum;
    2823             : }
    2824             : #endif
    2825             : 
    2826             : 
    2827             : // -----------------------------------------------------------------------------
    2828             : // OldSpace implementation
    2829             : 
    2830      160038 : void PagedSpace::PrepareForMarkCompact() {
    2831             :   // We don't have a linear allocation area while sweeping.  It will be restored
    2832             :   // on the first allocation after the sweep.
    2833      160038 :   EmptyAllocationInfo();
    2834             : 
    2835             :   // Clear the free list before a full GC---it will be rebuilt afterward.
    2836      160038 :   free_list_.Reset();
    2837      160038 : }
    2838             : 
    2839    12572565 : size_t PagedSpace::SizeOfObjects() {
    2840    12572565 :   CHECK_GE(limit(), top());
    2841             :   DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
    2842    37717695 :   return Size() - (limit() - top());
    2843             : }
    2844             : 
    2845             : 
    2846             : // After we have booted, we have created a map which represents free space
    2847             : // on the heap.  If there was already a free list then the elements on it
    2848             : // were created with the wrong FreeSpaceMap (normally NULL), so we need to
    2849             : // fix them.
    2850      182217 : void PagedSpace::RepairFreeListsAfterDeserialization() {
    2851      182241 :   free_list_.RepairLists(heap());
    2852             :   // Each page may have a small free space that is not tracked by a free list.
    2853             :   // Those free spaces still contain null as their map pointer.
    2854             :   // Overwrite them with new fillers.
    2855      971896 :   for (Page* page : *this) {
    2856      303731 :     int size = static_cast<int>(page->wasted_memory());
    2857      303731 :     if (size == 0) {
    2858             :       // If there is no wasted memory then all free space is in the free list.
    2859             :       continue;
    2860             :     }
    2861          24 :     Address start = page->HighWaterMark();
    2862             :     Address end = page->area_end();
    2863          24 :     CHECK_EQ(size, static_cast<int>(end - start));
    2864          24 :     heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
    2865             :   }
    2866      182217 : }
    2867             : 
    2868        1548 : HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
    2869        1548 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    2870        1548 :   if (collector->sweeping_in_progress()) {
    2871             :     // Wait for the sweeper threads here and complete the sweeping phase.
    2872          65 :     collector->EnsureSweepingCompleted();
    2873             : 
    2874             :     // After waiting for the sweeper threads, there may be new free-list
    2875             :     // entries.
    2876          65 :     return free_list_.Allocate(size_in_bytes);
    2877             :   }
    2878             :   return nullptr;
    2879             : }
    2880             : 
    2881        3815 : HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
    2882        3815 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    2883        3815 :   if (collector->sweeping_in_progress()) {
    2884        3815 :     collector->SweepAndRefill(this);
    2885        3815 :     return free_list_.Allocate(size_in_bytes);
    2886             :   }
    2887             :   return nullptr;
    2888             : }
    2889             : 
    2890      555198 : HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
    2891             :   DCHECK_GE(size_in_bytes, 0);
    2892             :   const int kMaxPagesToSweep = 1;
    2893             : 
    2894             :   // Allocation in this space has failed.
    2895             : 
    2896     1104337 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    2897             :   // Sweeping is still in progress.
    2898      555198 :   if (collector->sweeping_in_progress()) {
    2899       74575 :     if (FLAG_concurrent_sweeping && !is_local() &&
    2900       19642 :         !collector->sweeper().AreSweeperTasksRunning()) {
    2901       15089 :       collector->EnsureSweepingCompleted();
    2902             :     }
    2903             : 
    2904             :     // First try to refill the free-list, concurrent sweeper threads
    2905             :     // may have freed some objects in the meantime.
    2906       54920 :     RefillFreeList();
    2907             : 
    2908             :     // Retry the free list allocation.
    2909             :     HeapObject* object =
    2910       54937 :         free_list_.Allocate(static_cast<size_t>(size_in_bytes));
    2911       54937 :     if (object != NULL) return object;
    2912             : 
    2913             :     // If sweeping is still in progress try to sweep pages on the main thread.
    2914       38549 :     int max_freed = collector->sweeper().ParallelSweepSpace(
    2915       38549 :         identity(), size_in_bytes, kMaxPagesToSweep);
    2916       38549 :     RefillFreeList();
    2917       38549 :     if (max_freed >= size_in_bytes) {
    2918       28441 :       object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
    2919       28441 :       if (object != nullptr) return object;
    2920             :     }
    2921             :   }
    2922             : 
    2923      510590 :   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
    2924             :     DCHECK((CountTotalPages() > 1) ||
    2925             :            (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
    2926      505243 :     return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
    2927             :   }
    2928             : 
    2929             :   // If sweeper threads are active, wait for them at that point and steal
    2930             :   // elements form their free-lists. Allocation may still fail their which
    2931             :   // would indicate that there is not enough memory for the given allocation.
    2932        5363 :   return SweepAndRetryAllocation(size_in_bytes);
    2933             : }
    2934             : 
    2935             : #ifdef DEBUG
    2936             : void PagedSpace::ReportStatistics() {
    2937             :   int pct = static_cast<int>(Available() * 100 / Capacity());
    2938             :   PrintF("  capacity: %" PRIuS ", waste: %" PRIuS
    2939             :          ", available: %" PRIuS ", %%%d\n",
    2940             :          Capacity(), Waste(), Available(), pct);
    2941             : 
    2942             :   heap()->mark_compact_collector()->EnsureSweepingCompleted();
    2943             :   ClearHistograms(heap()->isolate());
    2944             :   HeapObjectIterator obj_it(this);
    2945             :   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
    2946             :     CollectHistogramInfo(obj);
    2947             :   ReportHistogram(heap()->isolate(), true);
    2948             : }
    2949             : #endif
    2950             : 
    2951             : 
    2952             : // -----------------------------------------------------------------------------
    2953             : // MapSpace implementation
    2954             : 
    2955             : #ifdef VERIFY_HEAP
    2956             : void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
    2957             : #endif
    2958             : 
    2959        1456 : Address LargePage::GetAddressToShrink() {
    2960        1456 :   HeapObject* object = GetObject();
    2961        1456 :   if (executable() == EXECUTABLE) {
    2962             :     return 0;
    2963             :   }
    2964        1440 :   size_t used_size = RoundUp((object->address() - address()) + object->Size(),
    2965        2880 :                              MemoryAllocator::GetCommitPageSize());
    2966        1440 :   if (used_size < CommittedPhysicalMemory()) {
    2967          37 :     return address() + used_size;
    2968             :   }
    2969             :   return 0;
    2970             : }
    2971             : 
    2972          37 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
    2973             :   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
    2974         111 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    2975             :   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
    2976          37 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    2977          37 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
    2978          37 :   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
    2979          37 : }
    2980             : 
    2981             : // -----------------------------------------------------------------------------
    2982             : // LargeObjectIterator
    2983             : 
    2984       81725 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
    2985       80785 :   current_ = space->first_page_;
    2986         940 : }
    2987             : 
    2988             : 
    2989       82581 : HeapObject* LargeObjectIterator::Next() {
    2990       82581 :   if (current_ == NULL) return NULL;
    2991             : 
    2992             :   HeapObject* object = current_->GetObject();
    2993        1796 :   current_ = current_->next_page();
    2994        1796 :   return object;
    2995             : }
    2996             : 
    2997             : 
    2998             : // -----------------------------------------------------------------------------
    2999             : // LargeObjectSpace
    3000             : 
    3001       60782 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
    3002             :     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
    3003             :       first_page_(NULL),
    3004             :       size_(0),
    3005             :       page_count_(0),
    3006             :       objects_size_(0),
    3007      121564 :       chunk_map_(1024) {}
    3008             : 
    3009      296425 : LargeObjectSpace::~LargeObjectSpace() {}
    3010             : 
    3011             : 
    3012       60782 : bool LargeObjectSpace::SetUp() {
    3013      120067 :   first_page_ = NULL;
    3014      120067 :   size_ = 0;
    3015      120067 :   page_count_ = 0;
    3016      120067 :   objects_size_ = 0;
    3017             :   chunk_map_.Clear();
    3018       60782 :   return true;
    3019             : }
    3020             : 
    3021             : 
    3022       59285 : void LargeObjectSpace::TearDown() {
    3023      132466 :   while (first_page_ != NULL) {
    3024             :     LargePage* page = first_page_;
    3025       13896 :     first_page_ = first_page_->next_page();
    3026       27792 :     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
    3027       13896 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    3028             :   }
    3029             :   SetUp();
    3030       59285 : }
    3031             : 
    3032             : 
    3033       21039 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
    3034             :                                                Executability executable) {
    3035             :   // Check if we want to force a GC before growing the old space further.
    3036             :   // If so, fail the allocation.
    3037      146677 :   if (!heap()->CanExpandOldGeneration(object_size) ||
    3038       20991 :       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
    3039             :     return AllocationResult::Retry(identity());
    3040             :   }
    3041             : 
    3042             :   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
    3043       20902 :       object_size, this, executable);
    3044       20902 :   if (page == NULL) return AllocationResult::Retry(identity());
    3045             :   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
    3046             : 
    3047       20902 :   size_ += static_cast<int>(page->size());
    3048             :   AccountCommitted(page->size());
    3049       20902 :   objects_size_ += object_size;
    3050       20902 :   page_count_++;
    3051       20902 :   page->set_next_page(first_page_);
    3052       20902 :   first_page_ = page;
    3053             : 
    3054       20902 :   InsertChunkMapEntries(page);
    3055             : 
    3056       20902 :   HeapObject* object = page->GetObject();
    3057             : 
    3058             :   if (Heap::ShouldZapGarbage()) {
    3059             :     // Make the object consistent so the heap can be verified in OldSpaceStep.
    3060             :     // We only need to do this in debug builds or if verify_heap is on.
    3061             :     reinterpret_cast<Object**>(object->address())[0] =
    3062             :         heap()->fixed_array_map();
    3063             :     reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
    3064             :   }
    3065             : 
    3066             :   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
    3067       20902 :                                                             kNoGCCallbackFlags);
    3068       20902 :   AllocationStep(object->address(), object_size);
    3069             : 
    3070             :   heap()->CreateFillerObjectAt(object->address(), object_size,
    3071       20902 :                                ClearRecordedSlots::kNo);
    3072             : 
    3073       20902 :   if (heap()->incremental_marking()->black_allocation()) {
    3074             :     ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
    3075             :   }
    3076       20902 :   return object;
    3077             : }
    3078             : 
    3079             : 
    3080           7 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
    3081             :   // On a platform that provides lazy committing of memory, we over-account
    3082             :   // the actually committed memory. There is no easy way right now to support
    3083             :   // precise accounting of committed memory in large object space.
    3084           7 :   return CommittedMemory();
    3085             : }
    3086             : 
    3087             : 
    3088             : // GC support
    3089           7 : Object* LargeObjectSpace::FindObject(Address a) {
    3090           7 :   LargePage* page = FindPage(a);
    3091           7 :   if (page != NULL) {
    3092           7 :     return page->GetObject();
    3093             :   }
    3094             :   return Smi::kZero;  // Signaling not found.
    3095             : }
    3096             : 
    3097    53131457 : LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
    3098    53131457 :   base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
    3099   106262914 :   return FindPage(a);
    3100             : }
    3101             : 
    3102    58794477 : LargePage* LargeObjectSpace::FindPage(Address a) {
    3103    58794477 :   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
    3104             :   base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
    3105    58794477 :                                               static_cast<uint32_t>(key));
    3106    58794479 :   if (e != NULL) {
    3107             :     DCHECK(e->value != NULL);
    3108    53131480 :     LargePage* page = reinterpret_cast<LargePage*>(e->value);
    3109             :     DCHECK(LargePage::IsValid(page));
    3110    53131480 :     if (page->Contains(a)) {
    3111    53131480 :       return page;
    3112             :     }
    3113             :   }
    3114             :   return NULL;
    3115             : }
    3116             : 
    3117             : 
    3118       55276 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
    3119             :   LargeObjectIterator it(this);
    3120       57057 :   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    3121        1781 :     if (ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj))) {
    3122             :       Marking::MarkWhite(
    3123             :           ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj)));
    3124             :       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
    3125             :       chunk->ResetProgressBar();
    3126             :       MarkingState::Internal(chunk).SetLiveBytes(0);
    3127             :     }
    3128             :     DCHECK(ObjectMarking::IsWhite(obj, MarkingState::Internal(obj)));
    3129             :   }
    3130       55276 : }
    3131             : 
    3132       20902 : void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
    3133             :   // Register all MemoryChunk::kAlignment-aligned chunks covered by
    3134             :   // this large page in the chunk map.
    3135       20902 :   uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
    3136       20902 :   uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
    3137       20902 :                     MemoryChunk::kAlignment;
    3138             :   // There may be concurrent access on the chunk map. We have to take the lock
    3139             :   // here.
    3140       20902 :   base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
    3141      110764 :   for (uintptr_t key = start; key <= limit; key++) {
    3142             :     base::HashMap::Entry* entry = chunk_map_.InsertNew(
    3143       89862 :         reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
    3144             :     DCHECK(entry != NULL);
    3145       89862 :     entry->value = page;
    3146             :   }
    3147       20902 : }
    3148             : 
    3149           0 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
    3150        6867 :   RemoveChunkMapEntries(page, page->address());
    3151           0 : }
    3152             : 
    3153        6904 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
    3154             :                                              Address free_start) {
    3155             :   uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start),
    3156        6904 :                             MemoryChunk::kAlignment) /
    3157        6904 :                     MemoryChunk::kAlignment;
    3158        6904 :   uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
    3159        6904 :                     MemoryChunk::kAlignment;
    3160       38861 :   for (uintptr_t key = start; key <= limit; key++) {
    3161       31957 :     chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
    3162             :   }
    3163        6904 : }
    3164             : 
    3165       53346 : void LargeObjectSpace::FreeUnmarkedObjects() {
    3166             :   LargePage* previous = NULL;
    3167       53346 :   LargePage* current = first_page_;
    3168      115015 :   while (current != NULL) {
    3169        8323 :     HeapObject* object = current->GetObject();
    3170             :     DCHECK(!ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
    3171        8323 :     if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
    3172             :       Address free_start;
    3173        1456 :       if ((free_start = current->GetAddressToShrink()) != 0) {
    3174             :         // TODO(hpayer): Perform partial free concurrently.
    3175          37 :         current->ClearOutOfLiveRangeSlots(free_start);
    3176          37 :         RemoveChunkMapEntries(current, free_start);
    3177        6904 :         heap()->memory_allocator()->PartialFreeMemory(current, free_start);
    3178             :       }
    3179             :       previous = current;
    3180             :       current = current->next_page();
    3181             :     } else {
    3182             :       LargePage* page = current;
    3183             :       // Cut the chunk out from the chunk list.
    3184             :       current = current->next_page();
    3185        6867 :       if (previous == NULL) {
    3186        1566 :         first_page_ = current;
    3187             :       } else {
    3188             :         previous->set_next_page(current);
    3189             :       }
    3190             : 
    3191             :       // Free the chunk.
    3192        6867 :       size_ -= static_cast<int>(page->size());
    3193             :       AccountUncommitted(page->size());
    3194        6867 :       objects_size_ -= object->Size();
    3195        6867 :       page_count_--;
    3196             : 
    3197             :       RemoveChunkMapEntries(page);
    3198        6867 :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    3199             :     }
    3200             :   }
    3201       53346 : }
    3202             : 
    3203             : 
    3204    61471289 : bool LargeObjectSpace::Contains(HeapObject* object) {
    3205    61471289 :   Address address = object->address();
    3206             :   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
    3207             : 
    3208    61471289 :   bool owned = (chunk->owner() == this);
    3209             : 
    3210             :   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
    3211             : 
    3212    61471291 :   return owned;
    3213             : }
    3214             : 
    3215       24569 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
    3216       24569 :   return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
    3217             : }
    3218             : 
    3219             : #ifdef VERIFY_HEAP
    3220             : // We do not assume that the large object iterator works, because it depends
    3221             : // on the invariants we are checking during verification.
    3222             : void LargeObjectSpace::Verify() {
    3223             :   for (LargePage* chunk = first_page_; chunk != NULL;
    3224             :        chunk = chunk->next_page()) {
    3225             :     // Each chunk contains an object that starts at the large object page's
    3226             :     // object area start.
    3227             :     HeapObject* object = chunk->GetObject();
    3228             :     Page* page = Page::FromAddress(object->address());
    3229             :     CHECK(object->address() == page->area_start());
    3230             : 
    3231             :     // The first word should be a map, and we expect all map pointers to be
    3232             :     // in map space.
    3233             :     Map* map = object->map();
    3234             :     CHECK(map->IsMap());
    3235             :     CHECK(heap()->map_space()->Contains(map));
    3236             : 
    3237             :     // We have only code, sequential strings, external strings
    3238             :     // (sequential strings that have been morphed into external
    3239             :     // strings), thin strings (sequential strings that have been
    3240             :     // morphed into thin strings), fixed arrays, fixed double arrays,
    3241             :     // byte arrays, and free space (right after allocation) in the
    3242             :     // large object space.
    3243             :     CHECK(object->IsAbstractCode() || object->IsSeqString() ||
    3244             :           object->IsExternalString() || object->IsThinString() ||
    3245             :           object->IsFixedArray() || object->IsFixedDoubleArray() ||
    3246             :           object->IsByteArray() || object->IsFreeSpace());
    3247             : 
    3248             :     // The object itself should look OK.
    3249             :     object->ObjectVerify();
    3250             : 
    3251             :     // Byte arrays and strings don't have interior pointers.
    3252             :     if (object->IsAbstractCode()) {
    3253             :       VerifyPointersVisitor code_visitor;
    3254             :       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
    3255             :     } else if (object->IsFixedArray()) {
    3256             :       FixedArray* array = FixedArray::cast(object);
    3257             :       for (int j = 0; j < array->length(); j++) {
    3258             :         Object* element = array->get(j);
    3259             :         if (element->IsHeapObject()) {
    3260             :           HeapObject* element_object = HeapObject::cast(element);
    3261             :           CHECK(heap()->Contains(element_object));
    3262             :           CHECK(element_object->map()->IsMap());
    3263             :         }
    3264             :       }
    3265             :     }
    3266             :   }
    3267             : }
    3268             : #endif
    3269             : 
    3270             : #ifdef DEBUG
    3271             : void LargeObjectSpace::Print() {
    3272             :   OFStream os(stdout);
    3273             :   LargeObjectIterator it(this);
    3274             :   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    3275             :     obj->Print(os);
    3276             :   }
    3277             : }
    3278             : 
    3279             : 
    3280             : void LargeObjectSpace::ReportStatistics() {
    3281             :   PrintF("  size: %" PRIuS "\n", size_);
    3282             :   int num_objects = 0;
    3283             :   ClearHistograms(heap()->isolate());
    3284             :   LargeObjectIterator it(this);
    3285             :   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    3286             :     num_objects++;
    3287             :     CollectHistogramInfo(obj);
    3288             :   }
    3289             : 
    3290             :   PrintF(
    3291             :       "  number of objects %d, "
    3292             :       "size of objects %" PRIuS "\n",
    3293             :       num_objects, objects_size_);
    3294             :   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
    3295             : }
    3296             : 
    3297             : 
    3298             : void Page::Print() {
    3299             :   // Make a best-effort to print the objects in the page.
    3300             :   PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
    3301             :          AllocationSpaceName(this->owner()->identity()));
    3302             :   printf(" --------------------------------------\n");
    3303             :   HeapObjectIterator objects(this);
    3304             :   unsigned mark_size = 0;
    3305             :   for (HeapObject* object = objects.Next(); object != NULL;
    3306             :        object = objects.Next()) {
    3307             :     bool is_marked =
    3308             :         ObjectMarking::IsBlackOrGrey(object, MarkingState::Internal(object));
    3309             :     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    3310             :     if (is_marked) {
    3311             :       mark_size += object->Size();
    3312             :     }
    3313             :     object->ShortPrint();
    3314             :     PrintF("\n");
    3315             :   }
    3316             :   printf(" --------------------------------------\n");
    3317             :   printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
    3318             :          MarkingState::Internal(this).live_bytes());
    3319             : }
    3320             : 
    3321             : #endif  // DEBUG
    3322             : }  // namespace internal
    3323             : }  // namespace v8

Generated by: LCOV version 1.10