LCOV - code coverage report
Current view: top level - src/heap - spaces.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1235 1401 88.2 %
Date: 2019-02-19 Functions: 211 250 84.4 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/spaces.h"
       6             : 
       7             : #include <utility>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/macros.h"
      11             : #include "src/base/platform/semaphore.h"
      12             : #include "src/base/template-utils.h"
      13             : #include "src/counters.h"
      14             : #include "src/heap/array-buffer-tracker.h"
      15             : #include "src/heap/concurrent-marking.h"
      16             : #include "src/heap/gc-tracer.h"
      17             : #include "src/heap/heap-controller.h"
      18             : #include "src/heap/incremental-marking-inl.h"
      19             : #include "src/heap/mark-compact.h"
      20             : #include "src/heap/remembered-set.h"
      21             : #include "src/heap/slot-set.h"
      22             : #include "src/heap/sweeper.h"
      23             : #include "src/msan.h"
      24             : #include "src/objects-inl.h"
      25             : #include "src/objects/free-space-inl.h"
      26             : #include "src/objects/js-array-buffer-inl.h"
      27             : #include "src/objects/js-array-inl.h"
      28             : #include "src/ostreams.h"
      29             : #include "src/snapshot/snapshot.h"
      30             : #include "src/v8.h"
      31             : #include "src/vm-state-inl.h"
      32             : 
      33             : namespace v8 {
      34             : namespace internal {
      35             : 
      36             : // These checks are here to ensure that the lower 32 bits of any real heap
      37             : // object can't overlap with the lower 32 bits of cleared weak reference value
      38             : // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
      39             : // in order to figure out if it's a cleared weak reference or not.
      40             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
      41             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
      42             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
      43             : 
      44             : // ----------------------------------------------------------------------------
      45             : // HeapObjectIterator
      46             : 
      47           6 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
      48             :     : cur_addr_(kNullAddress),
      49             :       cur_end_(kNullAddress),
      50             :       space_(space),
      51             :       page_range_(space->first_page(), nullptr),
      52       30352 :       current_page_(page_range_.begin()) {}
      53             : 
      54         443 : HeapObjectIterator::HeapObjectIterator(Page* page)
      55             :     : cur_addr_(kNullAddress),
      56             :       cur_end_(kNullAddress),
      57             :       space_(reinterpret_cast<PagedSpace*>(page->owner())),
      58             :       page_range_(page),
      59         886 :       current_page_(page_range_.begin()) {
      60             : #ifdef DEBUG
      61             :   Space* owner = page->owner();
      62             :   DCHECK(owner == page->heap()->old_space() ||
      63             :          owner == page->heap()->map_space() ||
      64             :          owner == page->heap()->code_space() ||
      65             :          owner == page->heap()->read_only_space());
      66             : #endif  // DEBUG
      67         443 : }
      68             : 
      69             : // We have hit the end of the page and should advance to the next block of
      70             : // objects.  This happens at the end of the page.
      71       81934 : bool HeapObjectIterator::AdvanceToNextPage() {
      72             :   DCHECK_EQ(cur_addr_, cur_end_);
      73      163868 :   if (current_page_ == page_range_.end()) return false;
      74             :   Page* cur_page = *(current_page_++);
      75       51147 :   Heap* heap = space_->heap();
      76             : 
      77       51147 :   heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
      78             : #ifdef ENABLE_MINOR_MC
      79      153441 :   if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
      80             :     heap->minor_mark_compact_collector()->MakeIterable(
      81             :         cur_page, MarkingTreatmentMode::CLEAR,
      82           0 :         FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
      83             : #else
      84             :   DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
      85             : #endif  // ENABLE_MINOR_MC
      86       51147 :   cur_addr_ = cur_page->area_start();
      87       51147 :   cur_end_ = cur_page->area_end();
      88             :   DCHECK(cur_page->SweepingDone());
      89       51147 :   return true;
      90             : }
      91             : 
      92       99052 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
      93       99052 :     : heap_(heap) {
      94             :   DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
      95             : 
      96      990520 :   for (SpaceIterator it(heap_); it.has_next();) {
      97      792416 :     it.next()->PauseAllocationObservers();
      98       99052 :   }
      99       99052 : }
     100             : 
     101       99052 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
     102      990520 :   for (SpaceIterator it(heap_); it.has_next();) {
     103      792416 :     it.next()->ResumeAllocationObservers();
     104       99052 :   }
     105       99052 : }
     106             : 
     107             : static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
     108             :     LAZY_INSTANCE_INITIALIZER;
     109             : 
     110       62069 : Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
     111       62069 :   base::MutexGuard guard(&mutex_);
     112             :   auto it = recently_freed_.find(code_range_size);
     113       64350 :   if (it == recently_freed_.end() || it->second.empty()) {
     114       59940 :     return reinterpret_cast<Address>(GetRandomMmapAddr());
     115             :   }
     116        2130 :   Address result = it->second.back();
     117             :   it->second.pop_back();
     118        2130 :   return result;
     119             : }
     120             : 
     121       62052 : void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
     122             :                                                 size_t code_range_size) {
     123       62052 :   base::MutexGuard guard(&mutex_);
     124       62052 :   recently_freed_[code_range_size].push_back(code_range_start);
     125       62052 : }
     126             : 
     127             : // -----------------------------------------------------------------------------
     128             : // MemoryAllocator
     129             : //
     130             : 
     131       62063 : MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
     132             :                                  size_t code_range_size)
     133             :     : isolate_(isolate),
     134       62063 :       data_page_allocator_(isolate->page_allocator()),
     135             :       code_page_allocator_(nullptr),
     136             :       capacity_(RoundUp(capacity, Page::kPageSize)),
     137             :       size_(0),
     138             :       size_executable_(0),
     139             :       lowest_ever_allocated_(static_cast<Address>(-1ll)),
     140             :       highest_ever_allocated_(kNullAddress),
     141      248253 :       unmapper_(isolate->heap(), this) {
     142       62064 :   InitializeCodePageAllocator(data_page_allocator_, code_range_size);
     143       62063 : }
     144             : 
     145       62063 : void MemoryAllocator::InitializeCodePageAllocator(
     146             :     v8::PageAllocator* page_allocator, size_t requested) {
     147             :   DCHECK_NULL(code_page_allocator_instance_.get());
     148             : 
     149       62063 :   code_page_allocator_ = page_allocator;
     150             : 
     151       62063 :   if (requested == 0) {
     152       62063 :     if (!kRequiresCodeRange) return;
     153             :     // When a target requires the code range feature, we put all code objects
     154             :     // in a kMaximalCodeRangeSize range of virtual address space, so that
     155             :     // they can call each other with near calls.
     156             :     requested = kMaximalCodeRangeSize;
     157           1 :   } else if (requested <= kMinimumCodeRangeSize) {
     158             :     requested = kMinimumCodeRangeSize;
     159             :   }
     160             : 
     161             :   const size_t reserved_area =
     162             :       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
     163             :   if (requested < (kMaximalCodeRangeSize - reserved_area)) {
     164             :     requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
     165             :     // Fullfilling both reserved pages requirement and huge code area
     166             :     // alignments is not supported (requires re-implementation).
     167             :     DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
     168             :   }
     169             :   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
     170             : 
     171             :   Address hint =
     172             :       RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
     173      124127 :                 page_allocator->AllocatePageSize());
     174             :   VirtualMemory reservation(
     175             :       page_allocator, requested, reinterpret_cast<void*>(hint),
     176      124128 :       Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
     177       62064 :   if (!reservation.IsReserved()) {
     178             :     V8::FatalProcessOutOfMemory(isolate_,
     179       62064 :                                 "CodeRange setup: allocate virtual memory");
     180             :   }
     181       62064 :   code_range_ = reservation.region();
     182             : 
     183             :   // We are sure that we have mapped a block of requested addresses.
     184             :   DCHECK_GE(reservation.size(), requested);
     185             :   Address base = reservation.address();
     186             : 
     187             :   // On some platforms, specifically Win64, we need to reserve some pages at
     188             :   // the beginning of an executable space. See
     189             :   //   https://cs.chromium.org/chromium/src/components/crash/content/
     190             :   //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
     191             :   // for details.
     192             :   if (reserved_area > 0) {
     193             :     if (!reservation.SetPermissions(base, reserved_area,
     194             :                                     PageAllocator::kReadWrite))
     195             :       V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
     196             : 
     197             :     base += reserved_area;
     198             :   }
     199       62064 :   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
     200             :   size_t size =
     201       62064 :       RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
     202       62064 :                 MemoryChunk::kPageSize);
     203             :   DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
     204             : 
     205      124128 :   LOG(isolate_,
     206             :       NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
     207             :                requested));
     208             : 
     209       62064 :   heap_reservation_.TakeControl(&reservation);
     210      124127 :   code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
     211             :       page_allocator, aligned_base, size,
     212             :       static_cast<size_t>(MemoryChunk::kAlignment));
     213       62063 :   code_page_allocator_ = code_page_allocator_instance_.get();
     214             : }
     215             : 
     216       62049 : void MemoryAllocator::TearDown() {
     217       62049 :   unmapper()->TearDown();
     218             : 
     219             :   // Check that spaces were torn down before MemoryAllocator.
     220             :   DCHECK_EQ(size_, 0u);
     221             :   // TODO(gc) this will be true again when we fix FreeMemory.
     222             :   // DCHECK_EQ(0, size_executable_);
     223       62049 :   capacity_ = 0;
     224             : 
     225       62049 :   if (last_chunk_.IsReserved()) {
     226           0 :     last_chunk_.Free();
     227             :   }
     228             : 
     229       62049 :   if (code_page_allocator_instance_.get()) {
     230             :     DCHECK(!code_range_.is_empty());
     231             :     code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
     232      124098 :                                                             code_range_.size());
     233       62049 :     code_range_ = base::AddressRegion();
     234             :     code_page_allocator_instance_.reset();
     235             :   }
     236       62049 :   code_page_allocator_ = nullptr;
     237       62049 :   data_page_allocator_ = nullptr;
     238       62049 : }
     239             : 
     240      340798 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
     241             :  public:
     242             :   explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
     243             :       : CancelableTask(isolate),
     244             :         unmapper_(unmapper),
     245      340978 :         tracer_(isolate->heap()->tracer()) {}
     246             : 
     247             :  private:
     248      162269 :   void RunInternal() override {
     249      649199 :     TRACE_BACKGROUND_GC(tracer_,
     250             :                         GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
     251      162286 :     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     252      162338 :     unmapper_->active_unmapping_tasks_--;
     253      162338 :     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
     254      162355 :     if (FLAG_trace_unmapper) {
     255           0 :       PrintIsolate(unmapper_->heap_->isolate(),
     256           0 :                    "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
     257      162337 :     }
     258      162364 :   }
     259             : 
     260             :   Unmapper* const unmapper_;
     261             :   GCTracer* const tracer_;
     262             :   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
     263             : };
     264             : 
     265      247184 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
     266      247184 :   if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
     267      170504 :     if (!MakeRoomForNewTasks()) {
     268             :       // kMaxUnmapperTasks are already running. Avoid creating any more.
     269          15 :       if (FLAG_trace_unmapper) {
     270           0 :         PrintIsolate(heap_->isolate(),
     271             :                      "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
     272           0 :                      kMaxUnmapperTasks);
     273             :       }
     274      247199 :       return;
     275             :     }
     276      340978 :     auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
     277      170489 :     if (FLAG_trace_unmapper) {
     278           0 :       PrintIsolate(heap_->isolate(),
     279             :                    "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
     280           0 :                    task->id());
     281             :     }
     282             :     DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
     283             :     DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
     284             :     DCHECK_GE(active_unmapping_tasks_, 0);
     285             :     active_unmapping_tasks_++;
     286      340978 :     task_ids_[pending_unmapping_tasks_++] = task->id();
     287      511467 :     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     288             :   } else {
     289       76680 :     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     290             :   }
     291             : }
     292             : 
     293      204231 : void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
     294      374720 :   for (int i = 0; i < pending_unmapping_tasks_; i++) {
     295      340978 :     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
     296             :         TryAbortResult::kTaskAborted) {
     297      162368 :       pending_unmapping_tasks_semaphore_.Wait();
     298             :     }
     299             :   }
     300      204231 :   pending_unmapping_tasks_ = 0;
     301             :   active_unmapping_tasks_ = 0;
     302             : 
     303      204231 :   if (FLAG_trace_unmapper) {
     304             :     PrintIsolate(
     305           0 :         heap_->isolate(),
     306           0 :         "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
     307             :   }
     308      204231 : }
     309             : 
     310       74510 : void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
     311       74510 :   CancelAndWaitForPendingTasks();
     312             :   // Free non-regular chunks because they cannot be re-used.
     313       74510 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     314       74510 : }
     315             : 
     316       69931 : void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
     317       69931 :   CancelAndWaitForPendingTasks();
     318       69932 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     319       69932 : }
     320             : 
     321      170504 : bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
     322             :   DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
     323             : 
     324      170504 :   if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
     325             :     // All previous unmapping tasks have been run to completion.
     326             :     // Finalize those tasks to make room for new ones.
     327       59789 :     CancelAndWaitForPendingTasks();
     328             :   }
     329      170504 :   return pending_unmapping_tasks_ != kMaxUnmapperTasks;
     330             : }
     331             : 
     332      445513 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
     333             :   MemoryChunk* chunk = nullptr;
     334      898134 :   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     335        7108 :     allocator_->PerformFreeMemory(chunk);
     336             :   }
     337      445531 : }
     338             : 
     339             : template <MemoryAllocator::Unmapper::FreeMode mode>
     340      370841 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
     341             :   MemoryChunk* chunk = nullptr;
     342      370841 :   if (FLAG_trace_unmapper) {
     343           0 :     PrintIsolate(
     344           0 :         heap_->isolate(),
     345             :         "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
     346           0 :         NumberOfChunks());
     347             :   }
     348             :   // Regular chunks.
     349      591285 :   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
     350             :     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
     351      220414 :     allocator_->PerformFreeMemory(chunk);
     352      220413 :     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
     353             :   }
     354             :   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
     355             :     // The previous loop uncommitted any pages marked as pooled and added them
     356             :     // to the pooled list. In case of kReleasePooled we need to free them
     357             :     // though.
     358      319864 :     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
     359      187882 :       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
     360             :     }
     361             :   }
     362      371025 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     363      371023 : }
     364             : 
     365       62051 : void MemoryAllocator::Unmapper::TearDown() {
     366       62051 :   CHECK_EQ(0, pending_unmapping_tasks_);
     367       62051 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     368             :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     369             :     DCHECK(chunks_[i].empty());
     370             :   }
     371       62051 : }
     372             : 
     373           0 : size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
     374           0 :   base::MutexGuard guard(&mutex_);
     375           0 :   return chunks_[kRegular].size() + chunks_[kNonRegular].size();
     376             : }
     377             : 
     378           5 : int MemoryAllocator::Unmapper::NumberOfChunks() {
     379           5 :   base::MutexGuard guard(&mutex_);
     380             :   size_t result = 0;
     381          20 :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     382          30 :     result += chunks_[i].size();
     383             :   }
     384          10 :   return static_cast<int>(result);
     385             : }
     386             : 
     387           0 : size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
     388           0 :   base::MutexGuard guard(&mutex_);
     389             : 
     390             :   size_t sum = 0;
     391             :   // kPooled chunks are already uncommited. We only have to account for
     392             :   // kRegular and kNonRegular chunks.
     393           0 :   for (auto& chunk : chunks_[kRegular]) {
     394           0 :     sum += chunk->size();
     395             :   }
     396           0 :   for (auto& chunk : chunks_[kNonRegular]) {
     397           0 :     sum += chunk->size();
     398             :   }
     399           0 :   return sum;
     400             : }
     401             : 
     402       27870 : bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
     403             :   Address base = reservation->address();
     404             :   size_t size = reservation->size();
     405       27870 :   if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
     406             :     return false;
     407             :   }
     408       27870 :   UpdateAllocatedSpaceLimits(base, base + size);
     409       55740 :   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
     410       27870 :   return true;
     411             : }
     412             : 
     413      206879 : bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
     414             :   size_t size = reservation->size();
     415      206879 :   if (!reservation->SetPermissions(reservation->address(), size,
     416      206879 :                                    PageAllocator::kNoAccess)) {
     417             :     return false;
     418             :   }
     419      413758 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
     420      206878 :   return true;
     421             : }
     422             : 
     423      248916 : void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
     424             :                                  Address base, size_t size) {
     425      248916 :   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
     426      248916 : }
     427             : 
     428      679008 : Address MemoryAllocator::AllocateAlignedMemory(
     429             :     size_t reserve_size, size_t commit_size, size_t alignment,
     430      679008 :     Executability executable, void* hint, VirtualMemory* controller) {
     431             :   v8::PageAllocator* page_allocator = this->page_allocator(executable);
     432             :   DCHECK(commit_size <= reserve_size);
     433      679008 :   VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
     434      679009 :   if (!reservation.IsReserved()) return kNullAddress;
     435             :   Address base = reservation.address();
     436             :   size_ += reservation.size();
     437             : 
     438      679009 :   if (executable == EXECUTABLE) {
     439      133757 :     if (!CommitExecutableMemory(&reservation, base, commit_size,
     440      133757 :                                 reserve_size)) {
     441             :       base = kNullAddress;
     442             :     }
     443             :   } else {
     444      545252 :     if (reservation.SetPermissions(base, commit_size,
     445             :                                    PageAllocator::kReadWrite)) {
     446      545252 :       UpdateAllocatedSpaceLimits(base, base + commit_size);
     447             :     } else {
     448             :       base = kNullAddress;
     449             :     }
     450             :   }
     451             : 
     452      679009 :   if (base == kNullAddress) {
     453             :     // Failed to commit the body. Free the mapping and any partially committed
     454             :     // regions inside it.
     455           0 :     reservation.Free();
     456             :     size_ -= reserve_size;
     457           0 :     return kNullAddress;
     458             :   }
     459             : 
     460      679009 :   controller->TakeControl(&reservation);
     461      679009 :   return base;
     462             : }
     463             : 
     464     7342619 : void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
     465             :   base::AddressRegion memory_area =
     466     7342619 :       MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
     467     7349900 :   if (memory_area.size() != 0) {
     468      109402 :     MemoryAllocator* memory_allocator = heap_->memory_allocator();
     469             :     v8::PageAllocator* page_allocator =
     470             :         memory_allocator->page_allocator(executable());
     471       54701 :     CHECK(page_allocator->DiscardSystemPages(
     472             :         reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
     473             :   }
     474     7349899 : }
     475             : 
     476     8499220 : size_t MemoryChunkLayout::CodePageGuardStartOffset() {
     477             :   // We are guarding code pages: the first OS page after the header
     478             :   // will be protected as non-writable.
     479     8499214 :   return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
     480             : }
     481             : 
     482         500 : size_t MemoryChunkLayout::CodePageGuardSize() {
     483     8499719 :   return MemoryAllocator::GetCommitPageSize();
     484             : }
     485             : 
     486     8231706 : intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
     487             :   // We are guarding code pages: the first OS page after the header
     488             :   // will be protected as non-writable.
     489    16463411 :   return CodePageGuardStartOffset() + CodePageGuardSize();
     490             : }
     491             : 
     492           0 : intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
     493             :   // We are guarding code pages: the last OS page will be protected as
     494             :   // non-writable.
     495      562208 :   return Page::kPageSize -
     496      562208 :          static_cast<int>(MemoryAllocator::GetCommitPageSize());
     497             : }
     498             : 
     499      562208 : size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
     500      562208 :   size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
     501             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     502      562208 :   return memory;
     503             : }
     504             : 
     505           5 : intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
     506           5 :   return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
     507             : }
     508             : 
     509        1000 : size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
     510             :     AllocationSpace space) {
     511       28870 :   if (space == CODE_SPACE) {
     512         500 :     return ObjectStartOffsetInCodePage();
     513             :   }
     514             :   return ObjectStartOffsetInDataPage();
     515             : }
     516             : 
     517      775625 : size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
     518             :   size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
     519             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     520      775625 :   return memory;
     521             : }
     522             : 
     523     1314373 : size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     524             :     AllocationSpace space) {
     525     1788788 :   if (space == CODE_SPACE) {
     526      562208 :     return AllocatableMemoryInCodePage();
     527             :   }
     528             :   return AllocatableMemoryInDataPage();
     529             : }
     530             : 
     531           0 : Heap* MemoryChunk::synchronized_heap() {
     532             :   return reinterpret_cast<Heap*>(
     533           0 :       base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
     534             : }
     535             : 
     536           0 : void MemoryChunk::InitializationMemoryFence() {
     537             :   base::SeqCst_MemoryFence();
     538             : #ifdef THREAD_SANITIZER
     539             :   // Since TSAN does not process memory fences, we use the following annotation
     540             :   // to tell TSAN that there is no data race when emitting a
     541             :   // InitializationMemoryFence. Note that the other thread still needs to
     542             :   // perform MemoryChunk::synchronized_heap().
     543             :   base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
     544             :                       reinterpret_cast<base::AtomicWord>(heap_));
     545             : #endif
     546           0 : }
     547             : 
     548     3738041 : void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     549             :     PageAllocator::Permission permission) {
     550             :   DCHECK(permission == PageAllocator::kRead ||
     551             :          permission == PageAllocator::kReadExecute);
     552             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     553             :   DCHECK(owner()->identity() == CODE_SPACE ||
     554             :          owner()->identity() == CODE_LO_SPACE);
     555             :   // Decrementing the write_unprotect_counter_ and changing the page
     556             :   // protection mode has to be atomic.
     557     3738041 :   base::MutexGuard guard(page_protection_change_mutex_);
     558     3738044 :   if (write_unprotect_counter_ == 0) {
     559             :     // This is a corner case that may happen when we have a
     560             :     // CodeSpaceMemoryModificationScope open and this page was newly
     561             :     // added.
     562     3738041 :     return;
     563             :   }
     564     3738044 :   write_unprotect_counter_--;
     565             :   DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     566     3738044 :   if (write_unprotect_counter_ == 0) {
     567             :     Address protect_start =
     568     3664604 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     569             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     570             :     DCHECK(IsAligned(protect_start, page_size));
     571             :     size_t protect_size = RoundUp(area_size(), page_size);
     572     3664604 :     CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
     573             :   }
     574             : }
     575             : 
     576       71547 : void MemoryChunk::SetReadable() {
     577       86750 :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
     578       71547 : }
     579             : 
     580     2942710 : void MemoryChunk::SetReadAndExecutable() {
     581             :   DCHECK(!FLAG_jitless);
     582             :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     583     3651292 :       PageAllocator::kReadExecute);
     584     2942709 : }
     585             : 
     586     3676564 : void MemoryChunk::SetReadAndWritable() {
     587             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     588             :   DCHECK(owner()->identity() == CODE_SPACE ||
     589             :          owner()->identity() == CODE_LO_SPACE);
     590             :   // Incrementing the write_unprotect_counter_ and changing the page
     591             :   // protection mode has to be atomic.
     592     3676564 :   base::MutexGuard guard(page_protection_change_mutex_);
     593     3676569 :   write_unprotect_counter_++;
     594             :   DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     595     3676569 :   if (write_unprotect_counter_ == 1) {
     596             :     Address unprotect_start =
     597     3603124 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     598             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     599             :     DCHECK(IsAligned(unprotect_start, page_size));
     600             :     size_t unprotect_size = RoundUp(area_size(), page_size);
     601     3603125 :     CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
     602             :                                       PageAllocator::kReadWrite));
     603             :   }
     604     3676570 : }
     605             : 
     606             : namespace {
     607             : 
     608             : PageAllocator::Permission DefaultWritableCodePermissions() {
     609             :   return FLAG_jitless ? PageAllocator::kReadWrite
     610           0 :                       : PageAllocator::kReadWriteExecute;
     611             : }
     612             : 
     613             : }  // namespace
     614             : 
     615      974391 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
     616             :                                      Address area_start, Address area_end,
     617      706879 :                                      Executability executable, Space* owner,
     618             :                                      VirtualMemory reservation) {
     619             :   MemoryChunk* chunk = FromAddress(base);
     620             : 
     621             :   DCHECK_EQ(base, chunk->address());
     622             : 
     623      706877 :   chunk->heap_ = heap;
     624      706877 :   chunk->size_ = size;
     625      706877 :   chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
     626             :   DCHECK(HasHeaderSentinel(area_start));
     627      706877 :   chunk->area_start_ = area_start;
     628      706877 :   chunk->area_end_ = area_end;
     629      706877 :   chunk->flags_ = Flags(NO_FLAGS);
     630             :   chunk->set_owner(owner);
     631             :   chunk->InitializeReservedMemory();
     632      706879 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
     633      706879 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
     634             :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
     635      706879 :                                        nullptr);
     636             :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
     637      706879 :                                        nullptr);
     638      706879 :   chunk->invalidated_slots_ = nullptr;
     639      706879 :   chunk->skip_list_ = nullptr;
     640             :   chunk->progress_bar_ = 0;
     641      706879 :   chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
     642             :   chunk->set_concurrent_sweeping_state(kSweepingDone);
     643      706879 :   chunk->page_protection_change_mutex_ = new base::Mutex();
     644      706879 :   chunk->write_unprotect_counter_ = 0;
     645      706879 :   chunk->mutex_ = new base::Mutex();
     646      706879 :   chunk->allocated_bytes_ = chunk->area_size();
     647      706879 :   chunk->wasted_memory_ = 0;
     648      706879 :   chunk->young_generation_bitmap_ = nullptr;
     649      706879 :   chunk->marking_bitmap_ = nullptr;
     650      706879 :   chunk->local_tracker_ = nullptr;
     651             : 
     652             :   chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
     653             :       0;
     654             :   chunk->external_backing_store_bytes_
     655             :       [ExternalBackingStoreType::kExternalString] = 0;
     656             : 
     657     4948153 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     658     4241274 :     chunk->categories_[i] = nullptr;
     659             :   }
     660             : 
     661             :   chunk->AllocateMarkingBitmap();
     662      706879 :   if (owner->identity() == RO_SPACE) {
     663             :     heap->incremental_marking()
     664             :         ->non_atomic_marking_state()
     665             :         ->bitmap(chunk)
     666       61049 :         ->MarkAllBits();
     667             :   } else {
     668             :     heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
     669             :                                                                           0);
     670             :   }
     671             : 
     672             :   DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
     673             :   DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
     674             :   DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
     675             : 
     676      706879 :   if (executable == EXECUTABLE) {
     677             :     chunk->SetFlag(IS_EXECUTABLE);
     678      133757 :     if (heap->write_protect_code_memory()) {
     679             :       chunk->write_unprotect_counter_ =
     680      133757 :           heap->code_space_memory_modification_scope_depth();
     681             :     } else {
     682             :       size_t page_size = MemoryAllocator::GetCommitPageSize();
     683             :       DCHECK(IsAligned(area_start, page_size));
     684           0 :       size_t area_size = RoundUp(area_end - area_start, page_size);
     685           0 :       CHECK(reservation.SetPermissions(area_start, area_size,
     686             :                                        DefaultWritableCodePermissions()));
     687             :     }
     688             :   }
     689             : 
     690             :   chunk->reservation_ = std::move(reservation);
     691             : 
     692      706879 :   return chunk;
     693             : }
     694             : 
     695      429099 : Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     696             :   Page* page = static_cast<Page*>(chunk);
     697             :   DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     698             :                 page->owner()->identity()),
     699             :             page->area_size());
     700             :   // Make sure that categories are initialized before freeing the area.
     701             :   page->ResetAllocatedBytes();
     702      429099 :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     703      429099 :   page->AllocateFreeListCategories();
     704             :   page->InitializeFreeListCategories();
     705             :   page->list_node().Initialize();
     706             :   page->InitializationMemoryFence();
     707      429099 :   return page;
     708             : }
     709             : 
     710      216320 : Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     711             :   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
     712             :   bool in_to_space = (id() != kFromSpace);
     713      216320 :   chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
     714             :   Page* page = static_cast<Page*>(chunk);
     715      216320 :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     716      216320 :   page->AllocateLocalTracker();
     717             :   page->list_node().Initialize();
     718             : #ifdef ENABLE_MINOR_MC
     719      216320 :   if (FLAG_minor_mc) {
     720             :     page->AllocateYoungGenerationBitmap();
     721             :     heap()
     722             :         ->minor_mark_compact_collector()
     723             :         ->non_atomic_marking_state()
     724           0 :         ->ClearLiveness(page);
     725             :   }
     726             : #endif  // ENABLE_MINOR_MC
     727             :   page->InitializationMemoryFence();
     728      216320 :   return page;
     729             : }
     730             : 
     731      282154 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
     732             :                                  Executability executable) {
     733      105015 :   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
     734             :     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
     735           0 :     FATAL("Code page is too large.");
     736             :   }
     737             : 
     738             :   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
     739             : 
     740             :   // Initialize the sentinel value for each page boundary since the mutator
     741             :   // may initialize the object starting from its end.
     742             :   Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
     743       61048 :                      MemoryChunk::kPageSize;
     744      238187 :   while (sentinel < chunk->area_end()) {
     745      116091 :     *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
     746      116091 :     sentinel += MemoryChunk::kPageSize;
     747             :   }
     748             : 
     749             :   LargePage* page = static_cast<LargePage*>(chunk);
     750             :   page->SetFlag(MemoryChunk::LARGE_PAGE);
     751             :   page->list_node().Initialize();
     752       61048 :   return page;
     753             : }
     754             : 
     755      429100 : void Page::AllocateFreeListCategories() {
     756     3003693 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     757             :     categories_[i] = new FreeListCategory(
     758     5149187 :         reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
     759             :   }
     760      429099 : }
     761             : 
     762         103 : void Page::InitializeFreeListCategories() {
     763     2575315 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     764     2575212 :     categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
     765             :   }
     766         103 : }
     767             : 
     768      645735 : void Page::ReleaseFreeListCategories() {
     769     4520110 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     770     3874374 :     if (categories_[i] != nullptr) {
     771     2574160 :       delete categories_[i];
     772     2574161 :       categories_[i] = nullptr;
     773             :     }
     774             :   }
     775      645736 : }
     776             : 
     777         588 : Page* Page::ConvertNewToOld(Page* old_page) {
     778             :   DCHECK(old_page);
     779             :   DCHECK(old_page->InNewSpace());
     780         588 :   OldSpace* old_space = old_page->heap()->old_space();
     781             :   old_page->set_owner(old_space);
     782             :   old_page->SetFlags(0, static_cast<uintptr_t>(~0));
     783         588 :   Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
     784         588 :   old_space->AddPage(new_page);
     785         588 :   return new_page;
     786             : }
     787             : 
     788       22115 : size_t MemoryChunk::CommittedPhysicalMemory() {
     789       24528 :   if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
     790        9851 :     return size();
     791        2413 :   return high_water_mark_;
     792             : }
     793             : 
     794       11590 : bool MemoryChunk::InOldSpace() const {
     795       11590 :   return owner()->identity() == OLD_SPACE;
     796             : }
     797             : 
     798           0 : bool MemoryChunk::InLargeObjectSpace() const {
     799           0 :   return owner()->identity() == LO_SPACE;
     800             : }
     801             : 
     802      679008 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
     803             :                                             size_t commit_area_size,
     804             :                                             Executability executable,
     805             :                                             Space* owner) {
     806             :   DCHECK_LE(commit_area_size, reserve_area_size);
     807             : 
     808             :   size_t chunk_size;
     809     1358016 :   Heap* heap = isolate_->heap();
     810             :   Address base = kNullAddress;
     811      679008 :   VirtualMemory reservation;
     812             :   Address area_start = kNullAddress;
     813             :   Address area_end = kNullAddress;
     814             :   void* address_hint =
     815             :       AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
     816             : 
     817             :   //
     818             :   // MemoryChunk layout:
     819             :   //
     820             :   //             Executable
     821             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     822             :   // |           Header           |
     823             :   // +----------------------------+<- base + CodePageGuardStartOffset
     824             :   // |           Guard            |
     825             :   // +----------------------------+<- area_start_
     826             :   // |           Area             |
     827             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     828             :   // |   Committed but not used   |
     829             :   // +----------------------------+<- aligned at OS page boundary
     830             :   // | Reserved but not committed |
     831             :   // +----------------------------+<- aligned at OS page boundary
     832             :   // |           Guard            |
     833             :   // +----------------------------+<- base + chunk_size
     834             :   //
     835             :   //           Non-executable
     836             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     837             :   // |          Header            |
     838             :   // +----------------------------+<- area_start_ (base + area_start_)
     839             :   // |           Area             |
     840             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     841             :   // |  Committed but not used    |
     842             :   // +----------------------------+<- aligned at OS page boundary
     843             :   // | Reserved but not committed |
     844             :   // +----------------------------+<- base + chunk_size
     845             :   //
     846             : 
     847      679009 :   if (executable == EXECUTABLE) {
     848      133757 :     chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
     849             :                                reserve_area_size +
     850             :                                MemoryChunkLayout::CodePageGuardSize(),
     851      133757 :                            GetCommitPageSize());
     852             : 
     853             :     // Size of header (not executable) plus area (executable).
     854             :     size_t commit_size = ::RoundUp(
     855      133757 :         MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
     856      133757 :         GetCommitPageSize());
     857             :     base =
     858             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     859      133757 :                               executable, address_hint, &reservation);
     860      133757 :     if (base == kNullAddress) return nullptr;
     861             :     // Update executable memory size.
     862             :     size_executable_ += reservation.size();
     863             : 
     864             :     if (Heap::ShouldZapGarbage()) {
     865             :       ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
     866             :       ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
     867             :                commit_area_size, kZapValue);
     868             :     }
     869             : 
     870      133757 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     871      133757 :     area_end = area_start + commit_area_size;
     872             :   } else {
     873             :     chunk_size = ::RoundUp(
     874             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
     875      545252 :         GetCommitPageSize());
     876             :     size_t commit_size = ::RoundUp(
     877             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     878      545252 :         GetCommitPageSize());
     879             :     base =
     880             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     881      545252 :                               executable, address_hint, &reservation);
     882             : 
     883      545252 :     if (base == kNullAddress) return nullptr;
     884             : 
     885             :     if (Heap::ShouldZapGarbage()) {
     886             :       ZapBlock(
     887             :           base,
     888             :           MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     889             :           kZapValue);
     890             :     }
     891             : 
     892      545252 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
     893      545252 :     area_end = area_start + commit_area_size;
     894             :   }
     895             : 
     896             :   // Use chunk_size for statistics and callbacks because we assume that they
     897             :   // treat reserved but not-yet committed memory regions of chunks as allocated.
     898             :   isolate_->counters()->memory_allocated()->Increment(
     899     1358018 :       static_cast<int>(chunk_size));
     900             : 
     901     1358016 :   LOG(isolate_,
     902             :       NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
     903             : 
     904             :   // We cannot use the last chunk in the address space because we would
     905             :   // overflow when comparing top and limit if this chunk is used for a
     906             :   // linear allocation area.
     907      679008 :   if ((base + chunk_size) == 0u) {
     908           0 :     CHECK(!last_chunk_.IsReserved());
     909           0 :     last_chunk_.TakeControl(&reservation);
     910           0 :     UncommitMemory(&last_chunk_);
     911             :     size_ -= chunk_size;
     912           0 :     if (executable == EXECUTABLE) {
     913             :       size_executable_ -= chunk_size;
     914             :     }
     915           0 :     CHECK(last_chunk_.IsReserved());
     916             :     return AllocateChunk(reserve_area_size, commit_area_size, executable,
     917           0 :                          owner);
     918             :   }
     919             : 
     920             :   MemoryChunk* chunk =
     921             :       MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
     922      679007 :                               executable, owner, std::move(reservation));
     923             : 
     924      679009 :   if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
     925      679009 :   return chunk;
     926             : }
     927             : 
     928      345337 : void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
     929      835484 :   if (is_marking) {
     930             :     SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     931             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     932             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     933             :   } else {
     934             :     ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     935             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     936             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     937             :   }
     938      345337 : }
     939             : 
     940      147701 : void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
     941             :   SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     942      364021 :   if (is_marking) {
     943             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     944             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     945             :   } else {
     946             :     ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     947             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     948             :   }
     949      147701 : }
     950             : 
     951     1388989 : void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
     952             : 
     953      229842 : void Page::AllocateLocalTracker() {
     954             :   DCHECK_NULL(local_tracker_);
     955      459688 :   local_tracker_ = new LocalArrayBufferTracker(this);
     956      229846 : }
     957             : 
     958       15919 : bool Page::contains_array_buffers() {
     959       31349 :   return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
     960             : }
     961             : 
     962           0 : void Page::ResetFreeListStatistics() {
     963      493495 :   wasted_memory_ = 0;
     964           0 : }
     965             : 
     966           0 : size_t Page::AvailableInFreeList() {
     967           0 :   size_t sum = 0;
     968           0 :   ForAllFreeListCategories([&sum](FreeListCategory* category) {
     969           0 :     sum += category->available();
     970             :   });
     971           0 :   return sum;
     972             : }
     973             : 
     974             : #ifdef DEBUG
     975             : namespace {
     976             : // Skips filler starting from the given filler until the end address.
     977             : // Returns the first address after the skipped fillers.
     978             : Address SkipFillers(HeapObject filler, Address end) {
     979             :   Address addr = filler->address();
     980             :   while (addr < end) {
     981             :     filler = HeapObject::FromAddress(addr);
     982             :     CHECK(filler->IsFiller());
     983             :     addr = filler->address() + filler->Size();
     984             :   }
     985             :   return addr;
     986             : }
     987             : }  // anonymous namespace
     988             : #endif  // DEBUG
     989             : 
     990      182844 : size_t Page::ShrinkToHighWaterMark() {
     991             :   // Shrinking only makes sense outside of the CodeRange, where we don't care
     992             :   // about address space fragmentation.
     993     1645416 :   VirtualMemory* reservation = reserved_memory();
     994      182844 :   if (!reservation->IsReserved()) return 0;
     995             : 
     996             :   // Shrink pages to high water mark. The water mark points either to a filler
     997             :   // or the area_end.
     998      365688 :   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
     999      182844 :   if (filler->address() == area_end()) return 0;
    1000      182839 :   CHECK(filler->IsFiller());
    1001             :   // Ensure that no objects were allocated in [filler, area_end) region.
    1002             :   DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
    1003             :   // Ensure that no objects will be allocated on this page.
    1004             :   DCHECK_EQ(0u, AvailableInFreeList());
    1005             : 
    1006             :   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
    1007      182839 :                             MemoryAllocator::GetCommitPageSize());
    1008      182839 :   if (unused > 0) {
    1009             :     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
    1010      182819 :     if (FLAG_trace_gc_verbose) {
    1011             :       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
    1012             :                    reinterpret_cast<void*>(this),
    1013             :                    reinterpret_cast<void*>(area_end()),
    1014           0 :                    reinterpret_cast<void*>(area_end() - unused));
    1015             :     }
    1016             :     heap()->CreateFillerObjectAt(
    1017             :         filler->address(),
    1018             :         static_cast<int>(area_end() - filler->address() - unused),
    1019      548457 :         ClearRecordedSlots::kNo);
    1020             :     heap()->memory_allocator()->PartialFreeMemory(
    1021      548457 :         this, address() + size() - unused, unused, area_end() - unused);
    1022      182819 :     if (filler->address() != area_end()) {
    1023      182819 :       CHECK(filler->IsFiller());
    1024      182819 :       CHECK_EQ(filler->address() + filler->Size(), area_end());
    1025             :     }
    1026             :   }
    1027      182839 :   return unused;
    1028             : }
    1029             : 
    1030      169032 : void Page::CreateBlackArea(Address start, Address end) {
    1031             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1032             :   DCHECK_EQ(Page::FromAddress(start), this);
    1033             :   DCHECK_NE(start, end);
    1034             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1035             :   IncrementalMarking::MarkingState* marking_state =
    1036      169032 :       heap()->incremental_marking()->marking_state();
    1037             :   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
    1038      338064 :                                         AddressToMarkbitIndex(end));
    1039      169032 :   marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
    1040      169032 : }
    1041             : 
    1042       13724 : void Page::DestroyBlackArea(Address start, Address end) {
    1043             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1044             :   DCHECK_EQ(Page::FromAddress(start), this);
    1045             :   DCHECK_NE(start, end);
    1046             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1047             :   IncrementalMarking::MarkingState* marking_state =
    1048       13724 :       heap()->incremental_marking()->marking_state();
    1049             :   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
    1050       27448 :                                           AddressToMarkbitIndex(end));
    1051       13724 :   marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
    1052       13724 : }
    1053             : 
    1054      182872 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1055             :                                         size_t bytes_to_free,
    1056             :                                         Address new_area_end) {
    1057      182872 :   VirtualMemory* reservation = chunk->reserved_memory();
    1058             :   DCHECK(reservation->IsReserved());
    1059      182872 :   chunk->size_ -= bytes_to_free;
    1060      182872 :   chunk->area_end_ = new_area_end;
    1061      182872 :   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
    1062             :     // Add guard page at the end.
    1063       60938 :     size_t page_size = GetCommitPageSize();
    1064             :     DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
    1065             :     DCHECK_EQ(chunk->address() + chunk->size(),
    1066             :               chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
    1067             :     reservation->SetPermissions(chunk->area_end_, page_size,
    1068       60938 :                                 PageAllocator::kNoAccess);
    1069             :   }
    1070             :   // On e.g. Windows, a reservation may be larger than a page and releasing
    1071             :   // partially starting at |start_free| will also release the potentially
    1072             :   // unused part behind the current page.
    1073      182872 :   const size_t released_bytes = reservation->Release(start_free);
    1074             :   DCHECK_GE(size_, released_bytes);
    1075             :   size_ -= released_bytes;
    1076             :   isolate_->counters()->memory_allocated()->Decrement(
    1077      365744 :       static_cast<int>(released_bytes));
    1078      182872 : }
    1079             : 
    1080      767822 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
    1081             :   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1082      706788 :   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
    1083             : 
    1084             :   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
    1085      706788 :                                          chunk->IsEvacuationCandidate());
    1086             : 
    1087             :   VirtualMemory* reservation = chunk->reserved_memory();
    1088             :   const size_t size =
    1089      706789 :       reservation->IsReserved() ? reservation->size() : chunk->size();
    1090             :   DCHECK_GE(size_, static_cast<size_t>(size));
    1091             :   size_ -= size;
    1092     1413578 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    1093      706789 :   if (chunk->executable() == EXECUTABLE) {
    1094             :     DCHECK_GE(size_executable_, size);
    1095             :     size_executable_ -= size;
    1096             :   }
    1097             : 
    1098             :   chunk->SetFlag(MemoryChunk::PRE_FREED);
    1099             : 
    1100      706789 :   if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
    1101      706789 : }
    1102             : 
    1103             : 
    1104      819985 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
    1105             :   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1106      697917 :   chunk->ReleaseAllocatedMemory();
    1107             : 
    1108      697911 :   VirtualMemory* reservation = chunk->reserved_memory();
    1109      697911 :   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
    1110      206880 :     UncommitMemory(reservation);
    1111             :   } else {
    1112      491031 :     if (reservation->IsReserved()) {
    1113      429997 :       reservation->Free();
    1114             :     } else {
    1115             :       // Only read-only pages can have non-initialized reservation object.
    1116             :       DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
    1117             :       FreeMemory(page_allocator(chunk->executable()), chunk->address(),
    1118       61034 :                  chunk->size());
    1119             :     }
    1120             :   }
    1121      697914 : }
    1122             : 
    1123             : template <MemoryAllocator::FreeMode mode>
    1124      894670 : void MemoryAllocator::Free(MemoryChunk* chunk) {
    1125             :   switch (mode) {
    1126             :     case kFull:
    1127      470394 :       PreFreeMemory(chunk);
    1128      470395 :       PerformFreeMemory(chunk);
    1129             :       break;
    1130             :     case kAlreadyPooled:
    1131             :       // Pooled pages cannot be touched anymore as their memory is uncommitted.
    1132             :       // Pooled pages are not-executable.
    1133      187882 :       FreeMemory(data_page_allocator(), chunk->address(),
    1134             :                  static_cast<size_t>(MemoryChunk::kPageSize));
    1135             :       break;
    1136             :     case kPooledAndQueue:
    1137             :       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
    1138             :       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
    1139             :       chunk->SetFlag(MemoryChunk::POOLED);
    1140             :       V8_FALLTHROUGH;
    1141             :     case kPreFreeAndQueue:
    1142      236394 :       PreFreeMemory(chunk);
    1143             :       // The chunks added to this queue will be freed by a concurrent thread.
    1144      236394 :       unmapper()->AddMemoryChunkSafe(chunk);
    1145             :       break;
    1146             :   }
    1147      894671 : }
    1148             : 
    1149             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1150             :     MemoryAllocator::kFull>(MemoryChunk* chunk);
    1151             : 
    1152             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1153             :     MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
    1154             : 
    1155             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1156             :     MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
    1157             : 
    1158             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1159             :     MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
    1160             : 
    1161             : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
    1162      644830 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
    1163             :                                     Executability executable) {
    1164             :   MemoryChunk* chunk = nullptr;
    1165             :   if (alloc_mode == kPooled) {
    1166             :     DCHECK_EQ(size, static_cast<size_t>(
    1167             :                         MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1168             :                             owner->identity())));
    1169             :     DCHECK_EQ(executable, NOT_EXECUTABLE);
    1170      216320 :     chunk = AllocatePagePooled(owner);
    1171             :   }
    1172      216320 :   if (chunk == nullptr) {
    1173      616960 :     chunk = AllocateChunk(size, size, executable, owner);
    1174             :   }
    1175      644831 :   if (chunk == nullptr) return nullptr;
    1176      644831 :   return owner->InitializePage(chunk, executable);
    1177             : }
    1178             : 
    1179             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1180             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1181             :         size_t size, PagedSpace* owner, Executability executable);
    1182             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1183             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1184             :         size_t size, SemiSpace* owner, Executability executable);
    1185             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1186             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1187             :         size_t size, SemiSpace* owner, Executability executable);
    1188             : 
    1189       61048 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
    1190             :                                               LargeObjectSpace* owner,
    1191             :                                               Executability executable) {
    1192       61048 :   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
    1193       61048 :   if (chunk == nullptr) return nullptr;
    1194       61048 :   return LargePage::Initialize(isolate_->heap(), chunk, executable);
    1195             : }
    1196             : 
    1197             : template <typename SpaceType>
    1198      244190 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
    1199      216320 :   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
    1200      216320 :   if (chunk == nullptr) return nullptr;
    1201             :   const int size = MemoryChunk::kPageSize;
    1202       27870 :   const Address start = reinterpret_cast<Address>(chunk);
    1203             :   const Address area_start =
    1204             :       start +
    1205       55740 :       MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
    1206       27870 :   const Address area_end = start + size;
    1207             :   // Pooled pages are always regular data pages.
    1208             :   DCHECK_NE(CODE_SPACE, owner->identity());
    1209             :   VirtualMemory reservation(data_page_allocator(), start, size);
    1210       27870 :   if (!CommitMemory(&reservation)) return nullptr;
    1211             :   if (Heap::ShouldZapGarbage()) {
    1212             :     ZapBlock(start, size, kZapValue);
    1213             :   }
    1214       27870 :   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
    1215       55740 :                           NOT_EXECUTABLE, owner, std::move(reservation));
    1216             :   size_ += size;
    1217       27870 :   return chunk;
    1218             : }
    1219             : 
    1220           0 : void MemoryAllocator::ZapBlock(Address start, size_t size,
    1221             :                                uintptr_t zap_value) {
    1222             :   DCHECK(IsAligned(start, kTaggedSize));
    1223             :   DCHECK(IsAligned(size, kTaggedSize));
    1224             :   MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
    1225           0 :                size >> kTaggedSizeLog2);
    1226           0 : }
    1227             : 
    1228           5 : intptr_t MemoryAllocator::GetCommitPageSize() {
    1229    33979509 :   if (FLAG_v8_os_page_size != 0) {
    1230             :     DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
    1231        2753 :     return FLAG_v8_os_page_size * KB;
    1232             :   } else {
    1233    33976756 :     return CommitPageSize();
    1234             :   }
    1235             : }
    1236             : 
    1237     7343183 : base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
    1238             :                                                               size_t size) {
    1239     7351231 :   size_t page_size = MemoryAllocator::GetCommitPageSize();
    1240     7351231 :   if (size < page_size + FreeSpace::kSize) {
    1241     7287433 :     return base::AddressRegion(0, 0);
    1242             :   }
    1243       63798 :   Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
    1244       63798 :   Address discardable_end = RoundDown(addr + size, page_size);
    1245       63798 :   if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
    1246             :   return base::AddressRegion(discardable_start,
    1247       54716 :                              discardable_end - discardable_start);
    1248             : }
    1249             : 
    1250      133757 : bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
    1251             :                                              size_t commit_size,
    1252             :                                              size_t reserved_size) {
    1253      133757 :   const size_t page_size = GetCommitPageSize();
    1254             :   // All addresses and sizes must be aligned to the commit page size.
    1255             :   DCHECK(IsAligned(start, page_size));
    1256             :   DCHECK_EQ(0, commit_size % page_size);
    1257             :   DCHECK_EQ(0, reserved_size % page_size);
    1258             :   const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
    1259      133757 :   const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
    1260             :   const size_t code_area_offset =
    1261      133757 :       MemoryChunkLayout::ObjectStartOffsetInCodePage();
    1262             :   // reserved_size includes two guard regions, commit_size does not.
    1263             :   DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
    1264      133757 :   const Address pre_guard_page = start + pre_guard_offset;
    1265      133757 :   const Address code_area = start + code_area_offset;
    1266      133757 :   const Address post_guard_page = start + reserved_size - guard_size;
    1267             :   // Commit the non-executable header, from start to pre-code guard page.
    1268      133757 :   if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
    1269             :     // Create the pre-code guard page, following the header.
    1270      133757 :     if (vm->SetPermissions(pre_guard_page, page_size,
    1271             :                            PageAllocator::kNoAccess)) {
    1272             :       // Commit the executable code body.
    1273      133757 :       if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
    1274      133757 :                              PageAllocator::kReadWrite)) {
    1275             :         // Create the post-code guard page.
    1276      133757 :         if (vm->SetPermissions(post_guard_page, page_size,
    1277             :                                PageAllocator::kNoAccess)) {
    1278      133757 :           UpdateAllocatedSpaceLimits(start, code_area + commit_size);
    1279      133757 :           return true;
    1280             :         }
    1281           0 :         vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
    1282             :       }
    1283             :     }
    1284           0 :     vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
    1285             :   }
    1286             :   return false;
    1287             : }
    1288             : 
    1289             : 
    1290             : // -----------------------------------------------------------------------------
    1291             : // MemoryChunk implementation
    1292             : 
    1293     1413575 : void MemoryChunk::ReleaseAllocatedMemory() {
    1294      706788 :   if (skip_list_ != nullptr) {
    1295       89275 :     delete skip_list_;
    1296       89275 :     skip_list_ = nullptr;
    1297             :   }
    1298      706788 :   if (mutex_ != nullptr) {
    1299      645751 :     delete mutex_;
    1300      645753 :     mutex_ = nullptr;
    1301             :   }
    1302      706790 :   if (page_protection_change_mutex_ != nullptr) {
    1303      706787 :     delete page_protection_change_mutex_;
    1304      706787 :     page_protection_change_mutex_ = nullptr;
    1305             :   }
    1306      706790 :   ReleaseSlotSet<OLD_TO_NEW>();
    1307      706783 :   ReleaseSlotSet<OLD_TO_OLD>();
    1308      706780 :   ReleaseTypedSlotSet<OLD_TO_NEW>();
    1309      706782 :   ReleaseTypedSlotSet<OLD_TO_OLD>();
    1310      706782 :   ReleaseInvalidatedSlots();
    1311      706780 :   if (local_tracker_ != nullptr) ReleaseLocalTracker();
    1312      706787 :   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
    1313      706787 :   if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
    1314             : 
    1315      706787 :   if (!IsLargePage()) {
    1316             :     Page* page = static_cast<Page*>(this);
    1317      645739 :     page->ReleaseFreeListCategories();
    1318             :   }
    1319      706784 : }
    1320             : 
    1321       93884 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
    1322       93884 :   size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
    1323             :   DCHECK_LT(0, pages);
    1324       93884 :   SlotSet* slot_set = new SlotSet[pages];
    1325      189914 :   for (size_t i = 0; i < pages; i++) {
    1326       96028 :     slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
    1327             :   }
    1328       93886 :   return slot_set;
    1329             : }
    1330             : 
    1331             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
    1332             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
    1333             : 
    1334             : template <RememberedSetType type>
    1335       93883 : SlotSet* MemoryChunk::AllocateSlotSet() {
    1336       93883 :   SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
    1337             :   SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
    1338       93886 :       &slot_set_[type], nullptr, slot_set);
    1339       93886 :   if (old_slot_set != nullptr) {
    1340          62 :     delete[] slot_set;
    1341             :     slot_set = old_slot_set;
    1342             :   }
    1343             :   DCHECK(slot_set);
    1344       93886 :   return slot_set;
    1345             : }
    1346             : 
    1347             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
    1348             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
    1349             : 
    1350             : template <RememberedSetType type>
    1351     1432135 : void MemoryChunk::ReleaseSlotSet() {
    1352     1432135 :   SlotSet* slot_set = slot_set_[type];
    1353     1432135 :   if (slot_set) {
    1354       93816 :     slot_set_[type] = nullptr;
    1355       93816 :     delete[] slot_set;
    1356             :   }
    1357     1432144 : }
    1358             : 
    1359             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
    1360             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
    1361             : 
    1362             : template <RememberedSetType type>
    1363        9317 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
    1364        9317 :   TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
    1365             :   TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
    1366        9317 :       &typed_slot_set_[type], nullptr, typed_slot_set);
    1367        9317 :   if (old_value != nullptr) {
    1368           0 :     delete typed_slot_set;
    1369             :     typed_slot_set = old_value;
    1370             :   }
    1371             :   DCHECK(typed_slot_set);
    1372        9317 :   return typed_slot_set;
    1373             : }
    1374             : 
    1375             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
    1376             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
    1377             : 
    1378             : template <RememberedSetType type>
    1379     1416685 : void MemoryChunk::ReleaseTypedSlotSet() {
    1380     1416685 :   TypedSlotSet* typed_slot_set = typed_slot_set_[type];
    1381     1416685 :   if (typed_slot_set) {
    1382        9317 :     typed_slot_set_[type] = nullptr;
    1383        9317 :     delete typed_slot_set;
    1384             :   }
    1385     1416685 : }
    1386             : 
    1387         126 : InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
    1388             :   DCHECK_NULL(invalidated_slots_);
    1389         252 :   invalidated_slots_ = new InvalidatedSlots();
    1390         126 :   return invalidated_slots_;
    1391             : }
    1392             : 
    1393      707522 : void MemoryChunk::ReleaseInvalidatedSlots() {
    1394      707522 :   if (invalidated_slots_) {
    1395         252 :     delete invalidated_slots_;
    1396         126 :     invalidated_slots_ = nullptr;
    1397             :   }
    1398      707522 : }
    1399             : 
    1400       48371 : void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
    1401      114063 :                                                      int size) {
    1402       48371 :   if (!ShouldSkipEvacuationSlotRecording()) {
    1403       38021 :     if (invalidated_slots() == nullptr) {
    1404         126 :       AllocateInvalidatedSlots();
    1405             :     }
    1406       38021 :     int old_size = (*invalidated_slots())[object];
    1407       76042 :     (*invalidated_slots())[object] = std::max(old_size, size);
    1408             :   }
    1409       48371 : }
    1410             : 
    1411           0 : bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
    1412           0 :   if (ShouldSkipEvacuationSlotRecording()) {
    1413             :     // Invalidated slots do not matter if we are not recording slots.
    1414             :     return true;
    1415             :   }
    1416           0 :   if (invalidated_slots() == nullptr) {
    1417             :     return false;
    1418             :   }
    1419             :   return invalidated_slots()->find(object) != invalidated_slots()->end();
    1420             : }
    1421             : 
    1422           5 : void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
    1423           5 :                                                  HeapObject new_start) {
    1424             :   DCHECK_LT(old_start, new_start);
    1425             :   DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
    1426             :             MemoryChunk::FromHeapObject(new_start));
    1427          10 :   if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
    1428             :     auto it = invalidated_slots()->find(old_start);
    1429           0 :     if (it != invalidated_slots()->end()) {
    1430           0 :       int old_size = it->second;
    1431           0 :       int delta = static_cast<int>(new_start->address() - old_start->address());
    1432             :       invalidated_slots()->erase(it);
    1433           0 :       (*invalidated_slots())[new_start] = old_size - delta;
    1434             :     }
    1435             :   }
    1436           5 : }
    1437             : 
    1438      229813 : void MemoryChunk::ReleaseLocalTracker() {
    1439             :   DCHECK_NOT_NULL(local_tracker_);
    1440      229813 :   delete local_tracker_;
    1441      229816 :   local_tracker_ = nullptr;
    1442      229816 : }
    1443             : 
    1444           0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
    1445             :   DCHECK_NULL(young_generation_bitmap_);
    1446           0 :   young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1447           0 : }
    1448             : 
    1449           0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
    1450             :   DCHECK_NOT_NULL(young_generation_bitmap_);
    1451           0 :   free(young_generation_bitmap_);
    1452           0 :   young_generation_bitmap_ = nullptr;
    1453           0 : }
    1454             : 
    1455           0 : void MemoryChunk::AllocateMarkingBitmap() {
    1456             :   DCHECK_NULL(marking_bitmap_);
    1457      706879 :   marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1458           0 : }
    1459             : 
    1460           0 : void MemoryChunk::ReleaseMarkingBitmap() {
    1461             :   DCHECK_NOT_NULL(marking_bitmap_);
    1462      706787 :   free(marking_bitmap_);
    1463      706787 :   marking_bitmap_ = nullptr;
    1464           0 : }
    1465             : 
    1466             : // -----------------------------------------------------------------------------
    1467             : // PagedSpace implementation
    1468             : 
    1469           0 : void Space::CheckOffsetsAreConsistent() const {
    1470             :   static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
    1471             :                 "ID offset inconsistent");
    1472             :   DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
    1473           0 : }
    1474             : 
    1475      304850 : void Space::AddAllocationObserver(AllocationObserver* observer) {
    1476      304850 :   allocation_observers_.push_back(observer);
    1477      304850 :   StartNextInlineAllocationStep();
    1478      304850 : }
    1479             : 
    1480      260467 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
    1481             :   auto it = std::find(allocation_observers_.begin(),
    1482      260467 :                       allocation_observers_.end(), observer);
    1483             :   DCHECK(allocation_observers_.end() != it);
    1484      260467 :   allocation_observers_.erase(it);
    1485      260467 :   StartNextInlineAllocationStep();
    1486      260467 : }
    1487             : 
    1488      792416 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
    1489             : 
    1490      297156 : void Space::ResumeAllocationObservers() {
    1491      792416 :   allocation_observers_paused_ = false;
    1492      297156 : }
    1493             : 
    1494   120378451 : void Space::AllocationStep(int bytes_since_last, Address soon_object,
    1495    66430495 :                            int size) {
    1496   120378451 :   if (!AllocationObserversActive()) {
    1497   120378449 :     return;
    1498             :   }
    1499             : 
    1500             :   DCHECK(!heap()->allocation_step_in_progress());
    1501             :   heap()->set_allocation_step_in_progress(true);
    1502    22143499 :   heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
    1503    66655324 :   for (AllocationObserver* observer : allocation_observers_) {
    1504    22368343 :     observer->AllocationStep(bytes_since_last, soon_object, size);
    1505             :   }
    1506             :   heap()->set_allocation_step_in_progress(false);
    1507             : }
    1508             : 
    1509           0 : intptr_t Space::GetNextInlineAllocationStepSize() {
    1510             :   intptr_t next_step = 0;
    1511    88629227 :   for (AllocationObserver* observer : allocation_observers_) {
    1512             :     next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
    1513    22208321 :                           : observer->bytes_to_next_step();
    1514             :   }
    1515             :   DCHECK(allocation_observers_.size() == 0 || next_step > 0);
    1516           0 :   return next_step;
    1517             : }
    1518             : 
    1519      474415 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
    1520             :                        Executability executable)
    1521     1423245 :     : SpaceWithLinearArea(heap, space), executable_(executable) {
    1522      474415 :   area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
    1523             :   accounting_stats_.Clear();
    1524      474415 : }
    1525             : 
    1526      474355 : void PagedSpace::TearDown() {
    1527     1362130 :   while (!memory_chunk_list_.Empty()) {
    1528             :     MemoryChunk* chunk = memory_chunk_list_.front();
    1529      413420 :     memory_chunk_list_.Remove(chunk);
    1530      826840 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
    1531             :   }
    1532             :   accounting_stats_.Clear();
    1533      474355 : }
    1534             : 
    1535      353314 : void PagedSpace::RefillFreeList() {
    1536             :   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
    1537             :   // generation spaces out.
    1538      882605 :   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
    1539      353314 :       identity() != MAP_SPACE && identity() != RO_SPACE) {
    1540      353484 :     return;
    1541             :   }
    1542      825198 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    1543             :   size_t added = 0;
    1544             :   {
    1545      478296 :     Page* p = nullptr;
    1546     1178533 :     while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
    1547             :       // Only during compaction pages can actually change ownership. This is
    1548             :       // safe because there exists no other competing action on the page links
    1549             :       // during compaction.
    1550      478296 :       if (is_local()) {
    1551             :         DCHECK_NE(this, p->owner());
    1552             :         PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
    1553       39620 :         base::MutexGuard guard(owner->mutex());
    1554       39620 :         owner->RefineAllocatedBytesAfterSweeping(p);
    1555       39620 :         owner->RemovePage(p);
    1556       39620 :         added += AddPage(p);
    1557             :       } else {
    1558      438676 :         base::MutexGuard guard(mutex());
    1559             :         DCHECK_EQ(this, p->owner());
    1560      438676 :         RefineAllocatedBytesAfterSweeping(p);
    1561      438676 :         added += RelinkFreeListCategories(p);
    1562             :       }
    1563      478296 :       added += p->wasted_memory();
    1564      478296 :       if (is_local() && (added > kCompactionMemoryWanted)) break;
    1565             :     }
    1566             :   }
    1567             : }
    1568             : 
    1569      230209 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
    1570      230209 :   base::MutexGuard guard(mutex());
    1571             : 
    1572             :   DCHECK(identity() == other->identity());
    1573             :   // Unmerged fields:
    1574             :   //   area_size_
    1575      230209 :   other->FreeLinearAllocationArea();
    1576             : 
    1577             :   // The linear allocation area of {other} should be destroyed now.
    1578             :   DCHECK_EQ(kNullAddress, other->top());
    1579             :   DCHECK_EQ(kNullAddress, other->limit());
    1580             : 
    1581             :   // Move over pages.
    1582      315890 :   for (auto it = other->begin(); it != other->end();) {
    1583             :     Page* p = *(it++);
    1584             :     // Relinking requires the category to be unlinked.
    1585       85681 :     other->RemovePage(p);
    1586       85681 :     AddPage(p);
    1587             :     DCHECK_EQ(p->AvailableInFreeList(),
    1588             :               p->AvailableInFreeListFromAllocatedBytes());
    1589             :   }
    1590             :   DCHECK_EQ(0u, other->Size());
    1591             :   DCHECK_EQ(0u, other->Capacity());
    1592      230209 : }
    1593             : 
    1594             : 
    1595        1004 : size_t PagedSpace::CommittedPhysicalMemory() {
    1596        1004 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    1597        1004 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1598             :   size_t size = 0;
    1599        2729 :   for (Page* page : *this) {
    1600        1725 :     size += page->CommittedPhysicalMemory();
    1601             :   }
    1602             :   return size;
    1603             : }
    1604             : 
    1605          20 : bool PagedSpace::ContainsSlow(Address addr) {
    1606             :   Page* p = Page::FromAddress(addr);
    1607         285 :   for (Page* page : *this) {
    1608         280 :     if (page == p) return true;
    1609             :   }
    1610             :   return false;
    1611             : }
    1612             : 
    1613     1434888 : void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
    1614      478296 :   CHECK(page->SweepingDone());
    1615             :   auto marking_state =
    1616       11845 :       heap()->incremental_marking()->non_atomic_marking_state();
    1617             :   // The live_byte on the page was accounted in the space allocated
    1618             :   // bytes counter. After sweeping allocated_bytes() contains the
    1619             :   // accurate live byte count on the page.
    1620      478296 :   size_t old_counter = marking_state->live_bytes(page);
    1621             :   size_t new_counter = page->allocated_bytes();
    1622             :   DCHECK_GE(old_counter, new_counter);
    1623      478296 :   if (old_counter > new_counter) {
    1624       11845 :     DecreaseAllocatedBytes(old_counter - new_counter, page);
    1625             :     // Give the heap a chance to adjust counters in response to the
    1626             :     // more precise and smaller old generation size.
    1627             :     heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
    1628             :   }
    1629             :   marking_state->SetLiveBytes(page, 0);
    1630      478296 : }
    1631             : 
    1632       25596 : Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
    1633       25596 :   base::MutexGuard guard(mutex());
    1634             :   // Check for pages that still contain free list entries. Bail out for smaller
    1635             :   // categories.
    1636             :   const int minimum_category =
    1637       51240 :       static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
    1638             :   Page* page = free_list()->GetPageForCategoryType(kHuge);
    1639       25620 :   if (!page && static_cast<int>(kLarge) >= minimum_category)
    1640             :     page = free_list()->GetPageForCategoryType(kLarge);
    1641       25620 :   if (!page && static_cast<int>(kMedium) >= minimum_category)
    1642             :     page = free_list()->GetPageForCategoryType(kMedium);
    1643       25620 :   if (!page && static_cast<int>(kSmall) >= minimum_category)
    1644             :     page = free_list()->GetPageForCategoryType(kSmall);
    1645       25620 :   if (!page && static_cast<int>(kTiny) >= minimum_category)
    1646             :     page = free_list()->GetPageForCategoryType(kTiny);
    1647       25620 :   if (!page && static_cast<int>(kTiniest) >= minimum_category)
    1648             :     page = free_list()->GetPageForCategoryType(kTiniest);
    1649       25620 :   if (!page) return nullptr;
    1650       18949 :   RemovePage(page);
    1651       18949 :   return page;
    1652             : }
    1653             : 
    1654     1146673 : size_t PagedSpace::AddPage(Page* page) {
    1655     1146672 :   CHECK(page->SweepingDone());
    1656      573336 :   page->set_owner(this);
    1657             :   memory_chunk_list_.PushBack(page);
    1658             :   AccountCommitted(page->size());
    1659             :   IncreaseCapacity(page->area_size());
    1660             :   IncreaseAllocatedBytes(page->allocated_bytes(), page);
    1661     1720011 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1662     1146674 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1663     1146674 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1664             :   }
    1665      573337 :   return RelinkFreeListCategories(page);
    1666             : }
    1667             : 
    1668      288498 : void PagedSpace::RemovePage(Page* page) {
    1669      288498 :   CHECK(page->SweepingDone());
    1670      144249 :   memory_chunk_list_.Remove(page);
    1671             :   UnlinkFreeListCategories(page);
    1672             :   DecreaseAllocatedBytes(page->allocated_bytes(), page);
    1673             :   DecreaseCapacity(page->area_size());
    1674             :   AccountUncommitted(page->size());
    1675      432749 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1676      288499 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1677      288499 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1678             :   }
    1679      144250 : }
    1680             : 
    1681      182844 : size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
    1682      182844 :   size_t unused = page->ShrinkToHighWaterMark();
    1683             :   accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
    1684             :   AccountUncommitted(unused);
    1685      182844 :   return unused;
    1686             : }
    1687             : 
    1688         400 : void PagedSpace::ResetFreeList() {
    1689      368229 :   for (Page* page : *this) {
    1690      185015 :     free_list_.EvictFreeListItems(page);
    1691             :   }
    1692             :   DCHECK(free_list_.IsEmpty());
    1693         400 : }
    1694             : 
    1695      182814 : void PagedSpace::ShrinkImmortalImmovablePages() {
    1696             :   DCHECK(!heap()->deserialization_complete());
    1697      182814 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1698      182814 :   FreeLinearAllocationArea();
    1699             :   ResetFreeList();
    1700      365638 :   for (Page* page : *this) {
    1701             :     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
    1702      182824 :     ShrinkPageToHighWaterMark(page);
    1703             :   }
    1704      182814 : }
    1705             : 
    1706     1285582 : bool PagedSpace::Expand() {
    1707             :   // Always lock against the main space as we can only adjust capacity and
    1708             :   // pages concurrently for the main paged space.
    1709     2571119 :   base::MutexGuard guard(heap()->paged_space(identity())->mutex());
    1710             : 
    1711             :   const int size = AreaSize();
    1712             : 
    1713      857088 :   if (!heap()->CanExpandOldGeneration(size)) return false;
    1714             : 
    1715             :   Page* page =
    1716      428498 :       heap()->memory_allocator()->AllocatePage(size, this, executable());
    1717      428498 :   if (page == nullptr) return false;
    1718             :   // Pages created during bootstrapping may contain immortal immovable objects.
    1719      428498 :   if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
    1720      428498 :   AddPage(page);
    1721             :   Free(page->area_start(), page->area_size(),
    1722      428499 :        SpaceAccountingMode::kSpaceAccounted);
    1723      428499 :   heap()->NotifyOldGenerationExpansion();
    1724      428499 :   return true;
    1725             : }
    1726             : 
    1727             : 
    1728      159673 : int PagedSpace::CountTotalPages() {
    1729             :   int count = 0;
    1730      495247 :   for (Page* page : *this) {
    1731      335574 :     count++;
    1732             :     USE(page);
    1733             :   }
    1734      159673 :   return count;
    1735             : }
    1736             : 
    1737             : 
    1738      223530 : void PagedSpace::ResetFreeListStatistics() {
    1739      717025 :   for (Page* page : *this) {
    1740             :     page->ResetFreeListStatistics();
    1741             :   }
    1742      223530 : }
    1743             : 
    1744     1231827 : void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
    1745             :   SetTopAndLimit(top, limit);
    1746     2463663 :   if (top != kNullAddress && top != limit &&
    1747     2463664 :       heap()->incremental_marking()->black_allocation()) {
    1748      143844 :     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
    1749             :   }
    1750     1231831 : }
    1751             : 
    1752    21856758 : void PagedSpace::DecreaseLimit(Address new_limit) {
    1753             :   Address old_limit = limit();
    1754             :   DCHECK_LE(top(), new_limit);
    1755             :   DCHECK_GE(old_limit, new_limit);
    1756    21856758 :   if (new_limit != old_limit) {
    1757             :     SetTopAndLimit(top(), new_limit);
    1758             :     Free(new_limit, old_limit - new_limit,
    1759       41149 :          SpaceAccountingMode::kSpaceAccounted);
    1760       82298 :     if (heap()->incremental_marking()->black_allocation()) {
    1761             :       Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
    1762       12335 :                                                                    old_limit);
    1763             :     }
    1764             :   }
    1765    21856758 : }
    1766             : 
    1767    23764571 : Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
    1768             :                                           size_t min_size) {
    1769             :   DCHECK_GE(end - start, min_size);
    1770             : 
    1771    45768835 :   if (heap()->inline_allocation_disabled()) {
    1772             :     // Fit the requested area exactly.
    1773      311893 :     return start + min_size;
    1774    45940493 :   } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
    1775             :     // Generated code may allocate inline from the linear allocation area for.
    1776             :     // To make sure we can observe these allocations, we use a lower limit.
    1777    22004264 :     size_t step = GetNextInlineAllocationStepSize();
    1778             : 
    1779             :     // TODO(ofrobots): there is subtle difference between old space and new
    1780             :     // space here. Any way to avoid it? `step - 1` makes more sense as we would
    1781             :     // like to sample the object that straddles the `start + step` boundary.
    1782             :     // Rounding down further would introduce a small statistical error in
    1783             :     // sampling. However, presently PagedSpace requires limit to be aligned.
    1784             :     size_t rounded_step;
    1785    22004264 :     if (identity() == NEW_SPACE) {
    1786             :       DCHECK_GE(step, 1);
    1787      439791 :       rounded_step = step - 1;
    1788             :     } else {
    1789    21564473 :       rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
    1790             :     }
    1791    44008530 :     return Min(static_cast<Address>(start + min_size + rounded_step), end);
    1792             :   } else {
    1793             :     // The entire node can be used as the linear allocation area.
    1794             :     return end;
    1795             :   }
    1796             : }
    1797             : 
    1798       82203 : void PagedSpace::MarkLinearAllocationAreaBlack() {
    1799             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1800             :   Address current_top = top();
    1801             :   Address current_limit = limit();
    1802       82203 :   if (current_top != kNullAddress && current_top != current_limit) {
    1803             :     Page::FromAllocationAreaAddress(current_top)
    1804       24577 :         ->CreateBlackArea(current_top, current_limit);
    1805             :   }
    1806       82203 : }
    1807             : 
    1808        2367 : void PagedSpace::UnmarkLinearAllocationArea() {
    1809             :   Address current_top = top();
    1810             :   Address current_limit = limit();
    1811        2367 :   if (current_top != kNullAddress && current_top != current_limit) {
    1812             :     Page::FromAllocationAreaAddress(current_top)
    1813        1389 :         ->DestroyBlackArea(current_top, current_limit);
    1814             :   }
    1815        2367 : }
    1816             : 
    1817     2525844 : void PagedSpace::FreeLinearAllocationArea() {
    1818             :   // Mark the old linear allocation area with a free space map so it can be
    1819             :   // skipped when scanning the heap.
    1820             :   Address current_top = top();
    1821             :   Address current_limit = limit();
    1822     2525844 :   if (current_top == kNullAddress) {
    1823             :     DCHECK_EQ(kNullAddress, current_limit);
    1824     2525775 :     return;
    1825             :   }
    1826             : 
    1827     3425879 :   if (heap()->incremental_marking()->black_allocation()) {
    1828      110501 :     Page* page = Page::FromAllocationAreaAddress(current_top);
    1829             : 
    1830             :     // Clear the bits in the unused black area.
    1831      155995 :     if (current_top != current_limit) {
    1832             :       IncrementalMarking::MarkingState* marking_state =
    1833             :           heap()->incremental_marking()->marking_state();
    1834             :       marking_state->bitmap(page)->ClearRange(
    1835             :           page->AddressToMarkbitIndex(current_top),
    1836      221002 :           page->AddressToMarkbitIndex(current_limit));
    1837             :       marking_state->IncrementLiveBytes(
    1838      110501 :           page, -static_cast<int>(current_limit - current_top));
    1839             :     }
    1840             :   }
    1841             : 
    1842     1113410 :   InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
    1843             :   SetTopAndLimit(kNullAddress, kNullAddress);
    1844             :   DCHECK_GE(current_limit, current_top);
    1845             : 
    1846             :   // The code page of the linear allocation area needs to be unprotected
    1847             :   // because we are going to write a filler into that memory area below.
    1848     1113350 :   if (identity() == CODE_SPACE) {
    1849             :     heap()->UnprotectAndRegisterMemoryChunk(
    1850       85709 :         MemoryChunk::FromAddress(current_top));
    1851             :   }
    1852             :   Free(current_top, current_limit - current_top,
    1853     1113350 :        SpaceAccountingMode::kSpaceAccounted);
    1854             : }
    1855             : 
    1856       15617 : void PagedSpace::ReleasePage(Page* page) {
    1857             :   DCHECK_EQ(
    1858             :       0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
    1859             :              page));
    1860             :   DCHECK_EQ(page->owner(), this);
    1861             : 
    1862       15617 :   free_list_.EvictFreeListItems(page);
    1863             :   DCHECK(!free_list_.ContainsPageFreeListItems(page));
    1864             : 
    1865       31234 :   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
    1866             :     DCHECK(!top_on_previous_step_);
    1867             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    1868             :   }
    1869             : 
    1870       31234 :   AccountUncommitted(page->size());
    1871             :   accounting_stats_.DecreaseCapacity(page->area_size());
    1872       15617 :   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    1873       15617 : }
    1874             : 
    1875       15067 : void PagedSpace::SetReadable() {
    1876             :   DCHECK(identity() == CODE_SPACE);
    1877       30270 :   for (Page* page : *this) {
    1878       30406 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1879       15203 :     page->SetReadable();
    1880             :   }
    1881       15067 : }
    1882             : 
    1883      405550 : void PagedSpace::SetReadAndExecutable() {
    1884             :   DCHECK(identity() == CODE_SPACE);
    1885     1114132 :   for (Page* page : *this) {
    1886     1417162 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1887      708582 :     page->SetReadAndExecutable();
    1888             :   }
    1889      405552 : }
    1890             : 
    1891      420617 : void PagedSpace::SetReadAndWritable() {
    1892             :   DCHECK(identity() == CODE_SPACE);
    1893     1082914 :   for (Page* page : *this) {
    1894     1324592 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1895      662297 :     page->SetReadAndWritable();
    1896             :   }
    1897      420619 : }
    1898             : 
    1899       30340 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
    1900       30340 :   return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
    1901             : }
    1902             : 
    1903     1797026 : bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
    1904             :   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
    1905             :   DCHECK_LE(top(), limit());
    1906             : #ifdef DEBUG
    1907             :   if (top() != limit()) {
    1908             :     DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
    1909             :   }
    1910             : #endif
    1911             :   // Don't free list allocate if there is linear space available.
    1912             :   DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
    1913             : 
    1914             :   // Mark the old linear allocation area with a free space map so it can be
    1915             :   // skipped when scanning the heap.  This also puts it back in the free list
    1916             :   // if it is big enough.
    1917     1797026 :   FreeLinearAllocationArea();
    1918             : 
    1919     1797000 :   if (!is_local()) {
    1920             :     heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    1921             :         heap()->GCFlagsForIncrementalMarking(),
    1922     3256312 :         kGCCallbackScheduleIdleGarbageCollection);
    1923             :   }
    1924             : 
    1925     1796924 :   size_t new_node_size = 0;
    1926     1796924 :   FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
    1927     1796872 :   if (new_node.is_null()) return false;
    1928             : 
    1929             :   DCHECK_GE(new_node_size, size_in_bytes);
    1930             : 
    1931             :   // The old-space-step might have finished sweeping and restarted marking.
    1932             :   // Verify that it did not turn the page of the new node into an evacuation
    1933             :   // candidate.
    1934             :   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
    1935             : 
    1936             :   // Memory in the linear allocation area is counted as allocated.  We may free
    1937             :   // a little of this again immediately - see below.
    1938             :   Page* page = Page::FromHeapObject(new_node);
    1939     1231799 :   IncreaseAllocatedBytes(new_node_size, page);
    1940             : 
    1941             :   Address start = new_node->address();
    1942     1231799 :   Address end = new_node->address() + new_node_size;
    1943     1231799 :   Address limit = ComputeLimit(start, end, size_in_bytes);
    1944             :   DCHECK_LE(limit, end);
    1945             :   DCHECK_LE(size_in_bytes, limit - start);
    1946     1231833 :   if (limit != end) {
    1947      237686 :     if (identity() == CODE_SPACE) {
    1948        2161 :       heap()->UnprotectAndRegisterMemoryChunk(page);
    1949             :     }
    1950      237686 :     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
    1951             :   }
    1952     1231833 :   SetLinearAllocationArea(start, limit);
    1953             : 
    1954     1231829 :   return true;
    1955             : }
    1956             : 
    1957             : #ifdef DEBUG
    1958             : void PagedSpace::Print() {}
    1959             : #endif
    1960             : 
    1961             : #ifdef VERIFY_HEAP
    1962             : void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
    1963             :   bool allocation_pointer_found_in_space =
    1964             :       (allocation_info_.top() == allocation_info_.limit());
    1965             :   size_t external_space_bytes[kNumTypes];
    1966             :   size_t external_page_bytes[kNumTypes];
    1967             : 
    1968             :   for (int i = 0; i < kNumTypes; i++) {
    1969             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1970             :   }
    1971             : 
    1972             :   for (Page* page : *this) {
    1973             :     CHECK(page->owner() == this);
    1974             : 
    1975             :     for (int i = 0; i < kNumTypes; i++) {
    1976             :       external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1977             :     }
    1978             : 
    1979             :     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
    1980             :       allocation_pointer_found_in_space = true;
    1981             :     }
    1982             :     CHECK(page->SweepingDone());
    1983             :     HeapObjectIterator it(page);
    1984             :     Address end_of_previous_object = page->area_start();
    1985             :     Address top = page->area_end();
    1986             : 
    1987             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    1988             :       CHECK(end_of_previous_object <= object->address());
    1989             : 
    1990             :       // The first word should be a map, and we expect all map pointers to
    1991             :       // be in map space.
    1992             :       Map map = object->map();
    1993             :       CHECK(map->IsMap());
    1994             :       CHECK(heap()->map_space()->Contains(map) ||
    1995             :             heap()->read_only_space()->Contains(map));
    1996             : 
    1997             :       // Perform space-specific object verification.
    1998             :       VerifyObject(object);
    1999             : 
    2000             :       // The object itself should look OK.
    2001             :       object->ObjectVerify(isolate);
    2002             : 
    2003             :       if (!FLAG_verify_heap_skip_remembered_set) {
    2004             :         heap()->VerifyRememberedSetFor(object);
    2005             :       }
    2006             : 
    2007             :       // All the interior pointers should be contained in the heap.
    2008             :       int size = object->Size();
    2009             :       object->IterateBody(map, size, visitor);
    2010             :       CHECK(object->address() + size <= top);
    2011             :       end_of_previous_object = object->address() + size;
    2012             : 
    2013             :       if (object->IsExternalString()) {
    2014             :         ExternalString external_string = ExternalString::cast(object);
    2015             :         size_t size = external_string->ExternalPayloadSize();
    2016             :         external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
    2017             :       } else if (object->IsJSArrayBuffer()) {
    2018             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2019             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2020             :           size_t size = array_buffer->byte_length();
    2021             :           external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2022             :         }
    2023             :       }
    2024             :     }
    2025             :     for (int i = 0; i < kNumTypes; i++) {
    2026             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2027             :       CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
    2028             :       external_space_bytes[t] += external_page_bytes[t];
    2029             :     }
    2030             :   }
    2031             :   for (int i = 0; i < kNumTypes; i++) {
    2032             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2033             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2034             :   }
    2035             :   CHECK(allocation_pointer_found_in_space);
    2036             : #ifdef DEBUG
    2037             :   VerifyCountersAfterSweeping();
    2038             : #endif
    2039             : }
    2040             : 
    2041             : void PagedSpace::VerifyLiveBytes() {
    2042             :   IncrementalMarking::MarkingState* marking_state =
    2043             :       heap()->incremental_marking()->marking_state();
    2044             :   for (Page* page : *this) {
    2045             :     CHECK(page->SweepingDone());
    2046             :     HeapObjectIterator it(page);
    2047             :     int black_size = 0;
    2048             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2049             :       // All the interior pointers should be contained in the heap.
    2050             :       if (marking_state->IsBlack(object)) {
    2051             :         black_size += object->Size();
    2052             :       }
    2053             :     }
    2054             :     CHECK_LE(black_size, marking_state->live_bytes(page));
    2055             :   }
    2056             : }
    2057             : #endif  // VERIFY_HEAP
    2058             : 
    2059             : #ifdef DEBUG
    2060             : void PagedSpace::VerifyCountersAfterSweeping() {
    2061             :   size_t total_capacity = 0;
    2062             :   size_t total_allocated = 0;
    2063             :   for (Page* page : *this) {
    2064             :     DCHECK(page->SweepingDone());
    2065             :     total_capacity += page->area_size();
    2066             :     HeapObjectIterator it(page);
    2067             :     size_t real_allocated = 0;
    2068             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2069             :       if (!object->IsFiller()) {
    2070             :         real_allocated += object->Size();
    2071             :       }
    2072             :     }
    2073             :     total_allocated += page->allocated_bytes();
    2074             :     // The real size can be smaller than the accounted size if array trimming,
    2075             :     // object slack tracking happened after sweeping.
    2076             :     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
    2077             :     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
    2078             :   }
    2079             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2080             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2081             : }
    2082             : 
    2083             : void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
    2084             :   // We need to refine the counters on pages that are already swept and have
    2085             :   // not been moved over to the actual space. Otherwise, the AccountingStats
    2086             :   // are just an over approximation.
    2087             :   RefillFreeList();
    2088             : 
    2089             :   size_t total_capacity = 0;
    2090             :   size_t total_allocated = 0;
    2091             :   auto marking_state =
    2092             :       heap()->incremental_marking()->non_atomic_marking_state();
    2093             :   for (Page* page : *this) {
    2094             :     size_t page_allocated =
    2095             :         page->SweepingDone()
    2096             :             ? page->allocated_bytes()
    2097             :             : static_cast<size_t>(marking_state->live_bytes(page));
    2098             :     total_capacity += page->area_size();
    2099             :     total_allocated += page_allocated;
    2100             :     DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
    2101             :   }
    2102             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2103             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2104             : }
    2105             : #endif
    2106             : 
    2107             : // -----------------------------------------------------------------------------
    2108             : // NewSpace implementation
    2109             : 
    2110       61054 : NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2111             :                    size_t initial_semispace_capacity,
    2112             :                    size_t max_semispace_capacity)
    2113             :     : SpaceWithLinearArea(heap, NEW_SPACE),
    2114             :       to_space_(heap, kToSpace),
    2115      122108 :       from_space_(heap, kFromSpace) {
    2116             :   DCHECK(initial_semispace_capacity <= max_semispace_capacity);
    2117             :   DCHECK(
    2118             :       base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
    2119             : 
    2120             :   to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2121             :   from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2122       61054 :   if (!to_space_.Commit()) {
    2123           0 :     V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
    2124             :   }
    2125             :   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
    2126       61054 :   ResetLinearAllocationArea();
    2127       61054 : }
    2128             : 
    2129       61044 : void NewSpace::TearDown() {
    2130             :   allocation_info_.Reset(kNullAddress, kNullAddress);
    2131             : 
    2132       61044 :   to_space_.TearDown();
    2133       61044 :   from_space_.TearDown();
    2134       61044 : }
    2135             : 
    2136       98000 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
    2137             : 
    2138             : 
    2139        1951 : void NewSpace::Grow() {
    2140             :   // Double the semispace size but only up to maximum capacity.
    2141             :   DCHECK(TotalCapacity() < MaximumCapacity());
    2142             :   size_t new_capacity =
    2143             :       Min(MaximumCapacity(),
    2144        3902 :           static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
    2145        1951 :   if (to_space_.GrowTo(new_capacity)) {
    2146             :     // Only grow from space if we managed to grow to-space.
    2147        1951 :     if (!from_space_.GrowTo(new_capacity)) {
    2148             :       // If we managed to grow to-space but couldn't grow from-space,
    2149             :       // attempt to shrink to-space.
    2150           0 :       if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
    2151             :         // We are in an inconsistent state because we could not
    2152             :         // commit/uncommit memory from new space.
    2153           0 :         FATAL("inconsistent state");
    2154             :       }
    2155             :     }
    2156             :   }
    2157             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2158        1951 : }
    2159             : 
    2160             : 
    2161       24484 : void NewSpace::Shrink() {
    2162       24484 :   size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
    2163             :   size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
    2164       24609 :   if (rounded_new_capacity < TotalCapacity() &&
    2165         125 :       to_space_.ShrinkTo(rounded_new_capacity)) {
    2166             :     // Only shrink from-space if we managed to shrink to-space.
    2167           0 :     from_space_.Reset();
    2168         125 :     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
    2169             :       // If we managed to shrink to-space but couldn't shrink from
    2170             :       // space, attempt to grow to-space again.
    2171           0 :       if (!to_space_.GrowTo(from_space_.current_capacity())) {
    2172             :         // We are in an inconsistent state because we could not
    2173             :         // commit/uncommit memory from new space.
    2174           0 :         FATAL("inconsistent state");
    2175             :       }
    2176             :     }
    2177             :   }
    2178             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2179       24484 : }
    2180             : 
    2181       74510 : bool NewSpace::Rebalance() {
    2182             :   // Order here is important to make use of the page pool.
    2183      149020 :   return to_space_.EnsureCurrentCapacity() &&
    2184      149020 :          from_space_.EnsureCurrentCapacity();
    2185             : }
    2186             : 
    2187      149020 : bool SemiSpace::EnsureCurrentCapacity() {
    2188      149020 :   if (is_committed()) {
    2189             :     const int expected_pages =
    2190      149020 :         static_cast<int>(current_capacity_ / Page::kPageSize);
    2191             :     MemoryChunk* current_page = first_page();
    2192             :     int actual_pages = 0;
    2193             : 
    2194             :     // First iterate through the pages list until expected pages if so many
    2195             :     // pages exist.
    2196      687625 :     while (current_page != nullptr && actual_pages < expected_pages) {
    2197      389585 :       actual_pages++;
    2198      389585 :       current_page = current_page->list_node().next();
    2199             :     }
    2200             : 
    2201             :     // Free all overallocated pages which are behind current_page.
    2202      150799 :     while (current_page) {
    2203        1779 :       MemoryChunk* next_current = current_page->list_node().next();
    2204        1779 :       memory_chunk_list_.Remove(current_page);
    2205             :       // Clear new space flags to avoid this page being treated as a new
    2206             :       // space page that is potentially being swept.
    2207             :       current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
    2208             :       heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    2209        8292 :           current_page);
    2210             :       current_page = next_current;
    2211             :     }
    2212             : 
    2213             :     // Add more pages if we have less than expected_pages.
    2214             :     IncrementalMarking::NonAtomicMarkingState* marking_state =
    2215             :         heap()->incremental_marking()->non_atomic_marking_state();
    2216      151387 :     while (actual_pages < expected_pages) {
    2217        2367 :       actual_pages++;
    2218             :       current_page =
    2219             :           heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2220             :               MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2221        2367 :               NOT_EXECUTABLE);
    2222        2367 :       if (current_page == nullptr) return false;
    2223             :       DCHECK_NOT_NULL(current_page);
    2224             :       memory_chunk_list_.PushBack(current_page);
    2225             :       marking_state->ClearLiveness(current_page);
    2226             :       current_page->SetFlags(first_page()->GetFlags(),
    2227        2367 :                              static_cast<uintptr_t>(Page::kCopyAllFlags));
    2228             :       heap()->CreateFillerObjectAt(current_page->area_start(),
    2229             :                                    static_cast<int>(current_page->area_size()),
    2230        4734 :                                    ClearRecordedSlots::kNo);
    2231             :     }
    2232             :   }
    2233             :   return true;
    2234             : }
    2235             : 
    2236     1084509 : LinearAllocationArea LocalAllocationBuffer::Close() {
    2237     1084509 :   if (IsValid()) {
    2238             :     heap_->CreateFillerObjectAt(
    2239             :         allocation_info_.top(),
    2240       95623 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2241       95623 :         ClearRecordedSlots::kNo);
    2242       95623 :     const LinearAllocationArea old_info = allocation_info_;
    2243       95623 :     allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
    2244       95623 :     return old_info;
    2245             :   }
    2246      988886 :   return LinearAllocationArea(kNullAddress, kNullAddress);
    2247             : }
    2248             : 
    2249      386625 : LocalAllocationBuffer::LocalAllocationBuffer(
    2250             :     Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    2251             :     : heap_(heap),
    2252      386625 :       allocation_info_(allocation_info) {
    2253      386625 :   if (IsValid()) {
    2254             :     heap_->CreateFillerObjectAt(
    2255             :         allocation_info_.top(),
    2256      194030 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2257      194030 :         ClearRecordedSlots::kNo);
    2258             :   }
    2259      386620 : }
    2260             : 
    2261      194183 : LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
    2262             :     V8_NOEXCEPT {
    2263             :   *this = other;
    2264      194179 : }
    2265             : 
    2266      194441 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
    2267             :     const LocalAllocationBuffer& other) V8_NOEXCEPT {
    2268      388624 :   Close();
    2269      388621 :   heap_ = other.heap_;
    2270      388621 :   allocation_info_ = other.allocation_info_;
    2271             : 
    2272             :   // This is needed since we (a) cannot yet use move-semantics, and (b) want
    2273             :   // to make the use of the class easy by it as value and (c) implicitly call
    2274             :   // {Close} upon copy.
    2275             :   const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
    2276             :       kNullAddress, kNullAddress);
    2277      194442 :   return *this;
    2278             : }
    2279             : 
    2280      221965 : void NewSpace::UpdateLinearAllocationArea() {
    2281             :   // Make sure there is no unaccounted allocations.
    2282             :   DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
    2283             : 
    2284      443930 :   Address new_top = to_space_.page_low();
    2285      221965 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2286             :   allocation_info_.Reset(new_top, to_space_.page_high());
    2287             :   // The order of the following two stores is important.
    2288             :   // See the corresponding loads in ConcurrentMarking::Run.
    2289             :   original_limit_.store(limit(), std::memory_order_relaxed);
    2290             :   original_top_.store(top(), std::memory_order_release);
    2291      221965 :   StartNextInlineAllocationStep();
    2292             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2293      221965 : }
    2294             : 
    2295      159054 : void NewSpace::ResetLinearAllocationArea() {
    2296             :   // Do a step to account for memory allocated so far before resetting.
    2297      159054 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2298             :   to_space_.Reset();
    2299      159054 :   UpdateLinearAllocationArea();
    2300             :   // Clear all mark-bits in the to-space.
    2301             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2302      406498 :       heap()->incremental_marking()->non_atomic_marking_state();
    2303      565552 :   for (Page* p : to_space_) {
    2304             :     marking_state->ClearLiveness(p);
    2305             :     // Concurrent marking may have local live bytes for this page.
    2306      406498 :     heap()->concurrent_marking()->ClearMemoryChunkData(p);
    2307             :   }
    2308      159054 : }
    2309             : 
    2310      675980 : void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2311     1351960 :   Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
    2312             :   allocation_info_.set_limit(new_limit);
    2313             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2314      675980 : }
    2315             : 
    2316    21856741 : void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2317    21856741 :   Address new_limit = ComputeLimit(top(), limit(), min_size);
    2318             :   DCHECK_LE(new_limit, limit());
    2319    21856759 :   DecreaseLimit(new_limit);
    2320    21856760 : }
    2321             : 
    2322       83014 : bool NewSpace::AddFreshPage() {
    2323       83014 :   Address top = allocation_info_.top();
    2324             :   DCHECK(!OldSpace::IsAtPageStart(top));
    2325             : 
    2326             :   // Do a step to account for memory allocated on previous page.
    2327       83014 :   InlineAllocationStep(top, top, kNullAddress, 0);
    2328             : 
    2329       83014 :   if (!to_space_.AdvancePage()) {
    2330             :     // No more pages left to advance.
    2331             :     return false;
    2332             :   }
    2333             : 
    2334             :   // Clear remainder of current page.
    2335       62911 :   Address limit = Page::FromAllocationAreaAddress(top)->area_end();
    2336       62911 :   int remaining_in_page = static_cast<int>(limit - top);
    2337       62911 :   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
    2338       62911 :   UpdateLinearAllocationArea();
    2339             : 
    2340       62911 :   return true;
    2341             : }
    2342             : 
    2343             : 
    2344           0 : bool NewSpace::AddFreshPageSynchronized() {
    2345           0 :   base::MutexGuard guard(&mutex_);
    2346           0 :   return AddFreshPage();
    2347             : }
    2348             : 
    2349             : 
    2350      372592 : bool NewSpace::EnsureAllocation(int size_in_bytes,
    2351             :                                 AllocationAlignment alignment) {
    2352      787622 :   Address old_top = allocation_info_.top();
    2353      435076 :   Address high = to_space_.page_high();
    2354      372592 :   int filler_size = Heap::GetFillToAlign(old_top, alignment);
    2355      372592 :   int aligned_size_in_bytes = size_in_bytes + filler_size;
    2356             : 
    2357      372592 :   if (old_top + aligned_size_in_bytes > high) {
    2358             :     // Not enough room in the page, try to allocate a new one.
    2359       82530 :     if (!AddFreshPage()) {
    2360             :       return false;
    2361             :     }
    2362             : 
    2363             :     old_top = allocation_info_.top();
    2364             :     high = to_space_.page_high();
    2365       62484 :     filler_size = Heap::GetFillToAlign(old_top, alignment);
    2366             :   }
    2367             : 
    2368             :   DCHECK(old_top + aligned_size_in_bytes <= high);
    2369             : 
    2370      352546 :   if (allocation_info_.limit() < high) {
    2371             :     // Either the limit has been lowered because linear allocation was disabled
    2372             :     // or because incremental marking wants to get a chance to do a step,
    2373             :     // or because idle scavenge job wants to get a chance to post a task.
    2374             :     // Set the new limit accordingly.
    2375      314151 :     Address new_top = old_top + aligned_size_in_bytes;
    2376      314151 :     Address soon_object = old_top + filler_size;
    2377      314151 :     InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
    2378      314151 :     UpdateInlineAllocationLimit(aligned_size_in_bytes);
    2379             :   }
    2380             :   return true;
    2381             : }
    2382             : 
    2383       98622 : size_t LargeObjectSpace::Available() {
    2384             :   // We return zero here since we cannot take advantage of already allocated
    2385             :   // large object memory.
    2386       98622 :   return 0;
    2387             : }
    2388             : 
    2389   120634871 : void SpaceWithLinearArea::StartNextInlineAllocationStep() {
    2390   120634871 :   if (heap()->allocation_step_in_progress()) {
    2391             :     // If we are mid-way through an existing step, don't start a new one.
    2392   120634889 :     return;
    2393             :   }
    2394             : 
    2395   120634892 :   if (AllocationObserversActive()) {
    2396    21707066 :     top_on_previous_step_ = top();
    2397    21707066 :     UpdateInlineAllocationLimit(0);
    2398             :   } else {
    2399             :     DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2400             :   }
    2401             : }
    2402             : 
    2403      213433 : void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
    2404      213433 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2405      213433 :   Space::AddAllocationObserver(observer);
    2406             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2407      213433 : }
    2408             : 
    2409      185688 : void SpaceWithLinearArea::RemoveAllocationObserver(
    2410             :     AllocationObserver* observer) {
    2411             :   Address top_for_next_step =
    2412      371376 :       allocation_observers_.size() == 1 ? kNullAddress : top();
    2413      185688 :   InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
    2414      185688 :   Space::RemoveAllocationObserver(observer);
    2415             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2416      185688 : }
    2417             : 
    2418      495260 : void SpaceWithLinearArea::PauseAllocationObservers() {
    2419             :   // Do a step to account for memory allocated so far.
    2420      495260 :   InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
    2421             :   Space::PauseAllocationObservers();
    2422             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2423      495260 :   UpdateInlineAllocationLimit(0);
    2424      495260 : }
    2425             : 
    2426      495260 : void SpaceWithLinearArea::ResumeAllocationObservers() {
    2427             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2428             :   Space::ResumeAllocationObservers();
    2429      495260 :   StartNextInlineAllocationStep();
    2430      495260 : }
    2431             : 
    2432     2563981 : void SpaceWithLinearArea::InlineAllocationStep(Address top,
    2433             :                                                Address top_for_next_step,
    2434             :                                                Address soon_object,
    2435             :                                                size_t size) {
    2436     2563981 :   if (heap()->allocation_step_in_progress()) {
    2437             :     // Avoid starting a new step if we are mid-way through an existing one.
    2438     2563981 :     return;
    2439             :   }
    2440             : 
    2441     2563987 :   if (top_on_previous_step_) {
    2442      798878 :     if (top < top_on_previous_step_) {
    2443             :       // Generated code decreased the top pointer to do folded allocations.
    2444             :       DCHECK_NE(top, kNullAddress);
    2445             :       DCHECK_EQ(Page::FromAllocationAreaAddress(top),
    2446             :                 Page::FromAllocationAreaAddress(top_on_previous_step_));
    2447           0 :       top_on_previous_step_ = top;
    2448             :     }
    2449      798878 :     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    2450      798878 :     AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
    2451      798878 :     top_on_previous_step_ = top_for_next_step;
    2452             :   }
    2453             : }
    2454             : 
    2455        7585 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
    2456        7585 :   return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
    2457             : }
    2458             : 
    2459             : #ifdef VERIFY_HEAP
    2460             : // We do not use the SemiSpaceIterator because verification doesn't assume
    2461             : // that it works (it depends on the invariants we are checking).
    2462             : void NewSpace::Verify(Isolate* isolate) {
    2463             :   // The allocation pointer should be in the space or at the very end.
    2464             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2465             : 
    2466             :   // There should be objects packed in from the low address up to the
    2467             :   // allocation pointer.
    2468             :   Address current = to_space_.first_page()->area_start();
    2469             :   CHECK_EQ(current, to_space_.space_start());
    2470             : 
    2471             :   size_t external_space_bytes[kNumTypes];
    2472             :   for (int i = 0; i < kNumTypes; i++) {
    2473             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2474             :   }
    2475             : 
    2476             :   while (current != top()) {
    2477             :     if (!Page::IsAlignedToPageSize(current)) {
    2478             :       // The allocation pointer should not be in the middle of an object.
    2479             :       CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
    2480             :             current < top());
    2481             : 
    2482             :       HeapObject object = HeapObject::FromAddress(current);
    2483             : 
    2484             :       // The first word should be a map, and we expect all map pointers to
    2485             :       // be in map space or read-only space.
    2486             :       Map map = object->map();
    2487             :       CHECK(map->IsMap());
    2488             :       CHECK(heap()->map_space()->Contains(map) ||
    2489             :             heap()->read_only_space()->Contains(map));
    2490             : 
    2491             :       // The object should not be code or a map.
    2492             :       CHECK(!object->IsMap());
    2493             :       CHECK(!object->IsAbstractCode());
    2494             : 
    2495             :       // The object itself should look OK.
    2496             :       object->ObjectVerify(isolate);
    2497             : 
    2498             :       // All the interior pointers should be contained in the heap.
    2499             :       VerifyPointersVisitor visitor(heap());
    2500             :       int size = object->Size();
    2501             :       object->IterateBody(map, size, &visitor);
    2502             : 
    2503             :       if (object->IsExternalString()) {
    2504             :         ExternalString external_string = ExternalString::cast(object);
    2505             :         size_t size = external_string->ExternalPayloadSize();
    2506             :         external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
    2507             :       } else if (object->IsJSArrayBuffer()) {
    2508             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2509             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2510             :           size_t size = array_buffer->byte_length();
    2511             :           external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2512             :         }
    2513             :       }
    2514             : 
    2515             :       current += size;
    2516             :     } else {
    2517             :       // At end of page, switch to next page.
    2518             :       Page* page = Page::FromAllocationAreaAddress(current)->next_page();
    2519             :       current = page->area_start();
    2520             :     }
    2521             :   }
    2522             : 
    2523             :   for (int i = 0; i < kNumTypes; i++) {
    2524             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2525             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2526             :   }
    2527             : 
    2528             :   // Check semi-spaces.
    2529             :   CHECK_EQ(from_space_.id(), kFromSpace);
    2530             :   CHECK_EQ(to_space_.id(), kToSpace);
    2531             :   from_space_.Verify();
    2532             :   to_space_.Verify();
    2533             : }
    2534             : #endif
    2535             : 
    2536             : // -----------------------------------------------------------------------------
    2537             : // SemiSpace implementation
    2538             : 
    2539           0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
    2540             :   DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
    2541      122108 :   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
    2542      122108 :   current_capacity_ = minimum_capacity_;
    2543      122108 :   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
    2544       61054 :   committed_ = false;
    2545           0 : }
    2546             : 
    2547             : 
    2548      122088 : void SemiSpace::TearDown() {
    2549             :   // Properly uncommit memory to keep the allocator counters in sync.
    2550      122088 :   if (is_committed()) {
    2551       74715 :     Uncommit();
    2552             :   }
    2553      122088 :   current_capacity_ = maximum_capacity_ = 0;
    2554           0 : }
    2555             : 
    2556             : 
    2557       97928 : bool SemiSpace::Commit() {
    2558             :   DCHECK(!is_committed());
    2559       97928 :   const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
    2560      294933 :   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
    2561             :     Page* new_page =
    2562             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2563             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2564      394010 :             NOT_EXECUTABLE);
    2565      197005 :     if (new_page == nullptr) {
    2566           0 :       if (pages_added) RewindPages(pages_added);
    2567             :       return false;
    2568             :     }
    2569             :     memory_chunk_list_.PushBack(new_page);
    2570             :   }
    2571             :   Reset();
    2572       97928 :   AccountCommitted(current_capacity_);
    2573       97928 :   if (age_mark_ == kNullAddress) {
    2574       77097 :     age_mark_ = first_page()->area_start();
    2575             :   }
    2576       97928 :   committed_ = true;
    2577       97928 :   return true;
    2578             : }
    2579             : 
    2580             : 
    2581       97913 : bool SemiSpace::Uncommit() {
    2582             :   DCHECK(is_committed());
    2583      408869 :   while (!memory_chunk_list_.Empty()) {
    2584             :     MemoryChunk* chunk = memory_chunk_list_.front();
    2585      213043 :     memory_chunk_list_.Remove(chunk);
    2586      523999 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
    2587             :   }
    2588       97913 :   current_page_ = nullptr;
    2589       97913 :   AccountUncommitted(current_capacity_);
    2590       97913 :   committed_ = false;
    2591       97913 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2592       97913 :   return true;
    2593             : }
    2594             : 
    2595             : 
    2596         342 : size_t SemiSpace::CommittedPhysicalMemory() {
    2597         342 :   if (!is_committed()) return 0;
    2598             :   size_t size = 0;
    2599        1030 :   for (Page* p : *this) {
    2600         688 :     size += p->CommittedPhysicalMemory();
    2601             :   }
    2602             :   return size;
    2603             : }
    2604             : 
    2605        3902 : bool SemiSpace::GrowTo(size_t new_capacity) {
    2606        3902 :   if (!is_committed()) {
    2607         109 :     if (!Commit()) return false;
    2608             :   }
    2609             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2610             :   DCHECK_LE(new_capacity, maximum_capacity_);
    2611             :   DCHECK_GT(new_capacity, current_capacity_);
    2612        3902 :   const size_t delta = new_capacity - current_capacity_;
    2613             :   DCHECK(IsAligned(delta, AllocatePageSize()));
    2614        3902 :   const int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2615             :   DCHECK(last_page());
    2616             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2617       16948 :       heap()->incremental_marking()->non_atomic_marking_state();
    2618       20850 :   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
    2619             :     Page* new_page =
    2620             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2621             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2622       16948 :             NOT_EXECUTABLE);
    2623       16948 :     if (new_page == nullptr) {
    2624           0 :       if (pages_added) RewindPages(pages_added);
    2625             :       return false;
    2626             :     }
    2627             :     memory_chunk_list_.PushBack(new_page);
    2628             :     marking_state->ClearLiveness(new_page);
    2629             :     // Duplicate the flags that was set on the old page.
    2630       16948 :     new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
    2631             :   }
    2632             :   AccountCommitted(delta);
    2633        3902 :   current_capacity_ = new_capacity;
    2634        3902 :   return true;
    2635             : }
    2636             : 
    2637         250 : void SemiSpace::RewindPages(int num_pages) {
    2638             :   DCHECK_GT(num_pages, 0);
    2639             :   DCHECK(last_page());
    2640        1380 :   while (num_pages > 0) {
    2641             :     MemoryChunk* last = last_page();
    2642         880 :     memory_chunk_list_.Remove(last);
    2643        1760 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
    2644         880 :     num_pages--;
    2645             :   }
    2646         250 : }
    2647             : 
    2648         250 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
    2649             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2650             :   DCHECK_GE(new_capacity, minimum_capacity_);
    2651             :   DCHECK_LT(new_capacity, current_capacity_);
    2652         250 :   if (is_committed()) {
    2653         250 :     const size_t delta = current_capacity_ - new_capacity;
    2654             :     DCHECK(IsAligned(delta, Page::kPageSize));
    2655         250 :     int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2656         250 :     RewindPages(delta_pages);
    2657         250 :     AccountUncommitted(delta);
    2658         250 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2659             :   }
    2660         250 :   current_capacity_ = new_capacity;
    2661         250 :   return true;
    2662             : }
    2663             : 
    2664      196000 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
    2665      763862 :   for (Page* page : *this) {
    2666      567862 :     page->set_owner(this);
    2667      567862 :     page->SetFlags(flags, mask);
    2668      567862 :     if (id_ == kToSpace) {
    2669             :       page->ClearFlag(MemoryChunk::FROM_PAGE);
    2670             :       page->SetFlag(MemoryChunk::TO_PAGE);
    2671             :       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2672             :       heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
    2673             :           page, 0);
    2674             :     } else {
    2675             :       page->SetFlag(MemoryChunk::FROM_PAGE);
    2676             :       page->ClearFlag(MemoryChunk::TO_PAGE);
    2677             :     }
    2678             :     DCHECK(page->InYoungGeneration());
    2679             :   }
    2680      196000 : }
    2681             : 
    2682             : 
    2683           0 : void SemiSpace::Reset() {
    2684             :   DCHECK(first_page());
    2685             :   DCHECK(last_page());
    2686      257107 :   current_page_ = first_page();
    2687      257107 :   pages_used_ = 0;
    2688           0 : }
    2689             : 
    2690        2367 : void SemiSpace::RemovePage(Page* page) {
    2691        2367 :   if (current_page_ == page) {
    2692         341 :     if (page->prev_page()) {
    2693         336 :       current_page_ = page->prev_page();
    2694             :     }
    2695             :   }
    2696        2367 :   memory_chunk_list_.Remove(page);
    2697        7101 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2698        4734 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2699        4734 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2700             :   }
    2701        2367 : }
    2702             : 
    2703        1779 : void SemiSpace::PrependPage(Page* page) {
    2704             :   page->SetFlags(current_page()->GetFlags(),
    2705        1779 :                  static_cast<uintptr_t>(Page::kCopyAllFlags));
    2706        1779 :   page->set_owner(this);
    2707             :   memory_chunk_list_.PushFront(page);
    2708        1779 :   pages_used_++;
    2709        5337 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2710        3558 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2711        3558 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2712             :   }
    2713        1779 : }
    2714             : 
    2715       98000 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
    2716             :   // We won't be swapping semispaces without data in them.
    2717             :   DCHECK(from->first_page());
    2718             :   DCHECK(to->first_page());
    2719             : 
    2720       98000 :   intptr_t saved_to_space_flags = to->current_page()->GetFlags();
    2721             : 
    2722             :   // We swap all properties but id_.
    2723             :   std::swap(from->current_capacity_, to->current_capacity_);
    2724             :   std::swap(from->maximum_capacity_, to->maximum_capacity_);
    2725             :   std::swap(from->minimum_capacity_, to->minimum_capacity_);
    2726             :   std::swap(from->age_mark_, to->age_mark_);
    2727             :   std::swap(from->committed_, to->committed_);
    2728             :   std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
    2729             :   std::swap(from->current_page_, to->current_page_);
    2730             :   std::swap(from->external_backing_store_bytes_,
    2731             :             to->external_backing_store_bytes_);
    2732             : 
    2733       98000 :   to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
    2734       98000 :   from->FixPagesFlags(0, 0);
    2735       98000 : }
    2736             : 
    2737       98000 : void SemiSpace::set_age_mark(Address mark) {
    2738             :   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
    2739       98000 :   age_mark_ = mark;
    2740             :   // Mark all pages up to the one containing mark.
    2741      210372 :   for (Page* p : PageRange(space_start(), mark)) {
    2742             :     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2743             :   }
    2744       98000 : }
    2745             : 
    2746           0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
    2747             :   // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
    2748           0 :   UNREACHABLE();
    2749             : }
    2750             : 
    2751             : #ifdef DEBUG
    2752             : void SemiSpace::Print() {}
    2753             : #endif
    2754             : 
    2755             : #ifdef VERIFY_HEAP
    2756             : void SemiSpace::Verify() {
    2757             :   bool is_from_space = (id_ == kFromSpace);
    2758             :   size_t external_backing_store_bytes[kNumTypes];
    2759             : 
    2760             :   for (int i = 0; i < kNumTypes; i++) {
    2761             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2762             :   }
    2763             : 
    2764             :   for (Page* page : *this) {
    2765             :     CHECK_EQ(page->owner(), this);
    2766             :     CHECK(page->InNewSpace());
    2767             :     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
    2768             :                                         : MemoryChunk::TO_PAGE));
    2769             :     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
    2770             :                                          : MemoryChunk::FROM_PAGE));
    2771             :     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    2772             :     if (!is_from_space) {
    2773             :       // The pointers-from-here-are-interesting flag isn't updated dynamically
    2774             :       // on from-space pages, so it might be out of sync with the marking state.
    2775             :       if (page->heap()->incremental_marking()->IsMarking()) {
    2776             :         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2777             :       } else {
    2778             :         CHECK(
    2779             :             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2780             :       }
    2781             :     }
    2782             :     for (int i = 0; i < kNumTypes; i++) {
    2783             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2784             :       external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
    2785             :     }
    2786             : 
    2787             :     CHECK_IMPLIES(page->list_node().prev(),
    2788             :                   page->list_node().prev()->list_node().next() == page);
    2789             :   }
    2790             :   for (int i = 0; i < kNumTypes; i++) {
    2791             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2792             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    2793             :   }
    2794             : }
    2795             : #endif
    2796             : 
    2797             : #ifdef DEBUG
    2798             : void SemiSpace::AssertValidRange(Address start, Address end) {
    2799             :   // Addresses belong to same semi-space
    2800             :   Page* page = Page::FromAllocationAreaAddress(start);
    2801             :   Page* end_page = Page::FromAllocationAreaAddress(end);
    2802             :   SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
    2803             :   DCHECK_EQ(space, end_page->owner());
    2804             :   // Start address is before end address, either on same page,
    2805             :   // or end address is on a later page in the linked list of
    2806             :   // semi-space pages.
    2807             :   if (page == end_page) {
    2808             :     DCHECK_LE(start, end);
    2809             :   } else {
    2810             :     while (page != end_page) {
    2811             :       page = page->next_page();
    2812             :     }
    2813             :     DCHECK(page);
    2814             :   }
    2815             : }
    2816             : #endif
    2817             : 
    2818             : 
    2819             : // -----------------------------------------------------------------------------
    2820             : // SemiSpaceIterator implementation.
    2821             : 
    2822        7585 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
    2823             :   Initialize(space->first_allocatable_address(), space->top());
    2824           0 : }
    2825             : 
    2826             : 
    2827           0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
    2828             :   SemiSpace::AssertValidRange(start, end);
    2829        7585 :   current_ = start;
    2830        7585 :   limit_ = end;
    2831           0 : }
    2832             : 
    2833         251 : size_t NewSpace::CommittedPhysicalMemory() {
    2834         251 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    2835         251 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2836         251 :   size_t size = to_space_.CommittedPhysicalMemory();
    2837         251 :   if (from_space_.is_committed()) {
    2838          91 :     size += from_space_.CommittedPhysicalMemory();
    2839             :   }
    2840         251 :   return size;
    2841             : }
    2842             : 
    2843             : 
    2844             : // -----------------------------------------------------------------------------
    2845             : // Free lists for old object spaces implementation
    2846             : 
    2847             : 
    2848           0 : void FreeListCategory::Reset() {
    2849             :   set_top(FreeSpace());
    2850             :   set_prev(nullptr);
    2851             :   set_next(nullptr);
    2852     2301697 :   available_ = 0;
    2853           0 : }
    2854             : 
    2855      663457 : FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
    2856             :                                              size_t* node_size) {
    2857             :   DCHECK(page()->CanAllocate());
    2858             :   FreeSpace node = top();
    2859     1262147 :   if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
    2860       85304 :     *node_size = 0;
    2861       85304 :     return FreeSpace();
    2862             :   }
    2863             :   set_top(node->next());
    2864      578184 :   *node_size = node->Size();
    2865      578184 :   available_ -= *node_size;
    2866      578184 :   return node;
    2867             : }
    2868             : 
    2869      678122 : FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
    2870             :                                                 size_t* node_size) {
    2871             :   DCHECK(page()->CanAllocate());
    2872             :   FreeSpace prev_non_evac_node;
    2873     1356839 :   for (FreeSpace cur_node = top(); !cur_node.is_null();
    2874             :        cur_node = cur_node->next()) {
    2875      654199 :     size_t size = cur_node->size();
    2876      654199 :     if (size >= minimum_size) {
    2877             :       DCHECK_GE(available_, size);
    2878      653604 :       available_ -= size;
    2879      653604 :       if (cur_node == top()) {
    2880             :         set_top(cur_node->next());
    2881             :       }
    2882      653604 :       if (!prev_non_evac_node.is_null()) {
    2883           0 :         MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
    2884           6 :         if (chunk->owner()->identity() == CODE_SPACE) {
    2885           0 :           chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
    2886             :         }
    2887             :         prev_non_evac_node->set_next(cur_node->next());
    2888             :       }
    2889      653604 :       *node_size = size;
    2890      653604 :       return cur_node;
    2891             :     }
    2892             : 
    2893             :     prev_non_evac_node = cur_node;
    2894             :   }
    2895       24518 :   return FreeSpace();
    2896             : }
    2897             : 
    2898    21267234 : void FreeListCategory::Free(Address start, size_t size_in_bytes,
    2899     3739161 :                             FreeMode mode) {
    2900             :   DCHECK(page()->CanAllocate());
    2901             :   FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
    2902             :   free_space->set_next(top());
    2903             :   set_top(free_space);
    2904    21267234 :   available_ += size_in_bytes;
    2905    23873687 :   if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    2906             :     owner()->AddCategory(this);
    2907             :   }
    2908    21267234 : }
    2909             : 
    2910             : 
    2911       60993 : void FreeListCategory::RepairFreeList(Heap* heap) {
    2912             :   FreeSpace n = top();
    2913      121986 :   while (!n.is_null()) {
    2914             :     MapWordSlot map_location = n.map_slot();
    2915             :     // We can't use .is_null() here because *map_location returns an
    2916             :     // Object (for which "is null" is not defined, as it would be
    2917             :     // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
    2918           0 :     if (*map_location == Map()) {
    2919             :       map_location.store(ReadOnlyRoots(heap).free_space_map());
    2920             :     } else {
    2921             :       DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
    2922             :     }
    2923             :     n = n->next();
    2924             :   }
    2925       60993 : }
    2926             : 
    2927     6072076 : void FreeListCategory::Relink() {
    2928             :   DCHECK(!is_linked());
    2929             :   owner()->AddCategory(this);
    2930           0 : }
    2931             : 
    2932      474415 : FreeList::FreeList() : wasted_bytes_(0) {
    2933     2846490 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2934     2846490 :     categories_[i] = nullptr;
    2935             :   }
    2936      474415 :   Reset();
    2937           0 : }
    2938             : 
    2939             : 
    2940      697945 : void FreeList::Reset() {
    2941             :   ForAllFreeListCategories(
    2942             :       [](FreeListCategory* category) { category->Reset(); });
    2943     4187670 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2944     4187670 :     categories_[i] = nullptr;
    2945             :   }
    2946      697945 :   ResetStats();
    2947      697945 : }
    2948             : 
    2949    21748977 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
    2950             :   Page* page = Page::FromAddress(start);
    2951             :   page->DecreaseAllocatedBytes(size_in_bytes);
    2952             : 
    2953             :   // Blocks have to be a minimum size to hold free list items.
    2954    21748977 :   if (size_in_bytes < kMinBlockSize) {
    2955             :     page->add_wasted_memory(size_in_bytes);
    2956             :     wasted_bytes_ += size_in_bytes;
    2957      482899 :     return size_in_bytes;
    2958             :   }
    2959             : 
    2960             :   // Insert other blocks at the head of a free list of the appropriate
    2961             :   // magnitude.
    2962             :   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
    2963    21266078 :   page->free_list_category(type)->Free(start, size_in_bytes, mode);
    2964             :   DCHECK_EQ(page->AvailableInFreeList(),
    2965             :             page->AvailableInFreeListFromAllocatedBytes());
    2966    21263773 :   return 0;
    2967             : }
    2968             : 
    2969     3019948 : FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    2970             :                                size_t* node_size) {
    2971             :   FreeListCategoryIterator it(this, type);
    2972             :   FreeSpace node;
    2973     6100863 :   while (it.HasNext()) {
    2974             :     FreeListCategory* current = it.Next();
    2975      596877 :     node = current->PickNodeFromList(minimum_size, node_size);
    2976      596878 :     if (!node.is_null()) {
    2977             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2978      535911 :       return node;
    2979             :     }
    2980             :     RemoveCategory(current);
    2981             :   }
    2982     2484038 :   return node;
    2983             : }
    2984             : 
    2985           0 : FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
    2986             :                                   size_t minimum_size, size_t* node_size) {
    2987      410471 :   if (categories_[type] == nullptr) return FreeSpace();
    2988       66583 :   FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
    2989             :   if (!node.is_null()) {
    2990             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2991             :   }
    2992       66615 :   return node;
    2993             : }
    2994             : 
    2995     1260924 : FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
    2996             :                                         size_t* node_size,
    2997             :                                         size_t minimum_size) {
    2998             :   FreeListCategoryIterator it(this, type);
    2999             :   FreeSpace node;
    3000     2546366 :   while (it.HasNext()) {
    3001             :     FreeListCategory* current = it.Next();
    3002      678122 :     node = current->SearchForNodeInList(minimum_size, node_size);
    3003      678122 :     if (!node.is_null()) {
    3004             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3005      653604 :       return node;
    3006             :     }
    3007       24518 :     if (current->is_empty()) {
    3008             :       RemoveCategory(current);
    3009             :     }
    3010             :   }
    3011      607320 :   return node;
    3012             : }
    3013             : 
    3014     1796864 : FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
    3015             :   DCHECK_GE(kMaxBlockSize, size_in_bytes);
    3016             :   FreeSpace node;
    3017             :   // First try the allocation fast path: try to allocate the minimum element
    3018             :   // size of a free list category. This operation is constant time.
    3019             :   FreeListCategoryType type =
    3020             :       SelectFastAllocationFreeListCategoryType(size_in_bytes);
    3021     4816817 :   for (int i = type; i < kHuge && node.is_null(); i++) {
    3022             :     node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
    3023     3019982 :                       node_size);
    3024             :   }
    3025             : 
    3026     1796835 :   if (node.is_null()) {
    3027             :     // Next search the huge list for free list nodes. This takes linear time in
    3028             :     // the number of huge elements.
    3029     1260941 :     node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
    3030             :   }
    3031             : 
    3032     1796817 :   if (node.is_null() && type != kHuge) {
    3033             :     // We didn't find anything in the huge list. Now search the best fitting
    3034             :     // free list for a node that has at least the requested size.
    3035             :     type = SelectFreeListCategoryType(size_in_bytes);
    3036             :     node = TryFindNodeIn(type, size_in_bytes, node_size);
    3037             :   }
    3038             : 
    3039     1796849 :   if (!node.is_null()) {
    3040     1231789 :     Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
    3041             :   }
    3042             : 
    3043             :   DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3044     1796849 :   return node;
    3045             : }
    3046             : 
    3047      211347 : size_t FreeList::EvictFreeListItems(Page* page) {
    3048      211347 :   size_t sum = 0;
    3049     1268082 :   page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
    3050             :     DCHECK_EQ(this, category->owner());
    3051     1268082 :     sum += category->available();
    3052     1268082 :     RemoveCategory(category);
    3053             :     category->Reset();
    3054     1268082 :   });
    3055      211347 :   return sum;
    3056             : }
    3057             : 
    3058           0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
    3059           0 :   bool contained = false;
    3060             :   page->ForAllFreeListCategories(
    3061           0 :       [this, &contained](FreeListCategory* category) {
    3062           0 :         if (category->owner() == this && category->is_linked()) {
    3063           0 :           contained = true;
    3064             :         }
    3065           0 :       });
    3066           0 :   return contained;
    3067             : }
    3068             : 
    3069           0 : void FreeList::RepairLists(Heap* heap) {
    3070             :   ForAllFreeListCategories(
    3071      121986 :       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
    3072           0 : }
    3073             : 
    3074           0 : bool FreeList::AddCategory(FreeListCategory* category) {
    3075     7204784 :   FreeListCategoryType type = category->type_;
    3076             :   DCHECK_LT(type, kNumberOfCategories);
    3077     7204784 :   FreeListCategory* top = categories_[type];
    3078             : 
    3079     7204784 :   if (category->is_empty()) return false;
    3080     2472945 :   if (top == category) return false;
    3081             : 
    3082             :   // Common double-linked list insertion.
    3083     1846228 :   if (top != nullptr) {
    3084             :     top->set_prev(category);
    3085             :   }
    3086             :   category->set_next(top);
    3087     1846228 :   categories_[type] = category;
    3088           0 :   return true;
    3089             : }
    3090             : 
    3091     4951994 : void FreeList::RemoveCategory(FreeListCategory* category) {
    3092     2218514 :   FreeListCategoryType type = category->type_;
    3093             :   DCHECK_LT(type, kNumberOfCategories);
    3094     2218514 :   FreeListCategory* top = categories_[type];
    3095             : 
    3096             :   // Common double-linked list removal.
    3097     2218514 :   if (top == category) {
    3098      443958 :     categories_[type] = category->next();
    3099             :   }
    3100     2218514 :   if (category->prev() != nullptr) {
    3101             :     category->prev()->set_next(category->next());
    3102             :   }
    3103     2218514 :   if (category->next() != nullptr) {
    3104             :     category->next()->set_prev(category->prev());
    3105             :   }
    3106             :   category->set_next(nullptr);
    3107             :   category->set_prev(nullptr);
    3108           6 : }
    3109             : 
    3110           0 : void FreeList::PrintCategories(FreeListCategoryType type) {
    3111             :   FreeListCategoryIterator it(this, type);
    3112             :   PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
    3113           0 :          static_cast<void*>(categories_[type]), type);
    3114           0 :   while (it.HasNext()) {
    3115             :     FreeListCategory* current = it.Next();
    3116           0 :     PrintF("%p -> ", static_cast<void*>(current));
    3117             :   }
    3118           0 :   PrintF("null\n");
    3119           0 : }
    3120             : 
    3121             : 
    3122             : #ifdef DEBUG
    3123             : size_t FreeListCategory::SumFreeList() {
    3124             :   size_t sum = 0;
    3125             :   FreeSpace cur = top();
    3126             :   while (!cur.is_null()) {
    3127             :     // We can't use "cur->map()" here because both cur's map and the
    3128             :     // root can be null during bootstrapping.
    3129             :     DCHECK_EQ(*cur->map_slot(),
    3130             :               page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
    3131             :     sum += cur->relaxed_read_size();
    3132             :     cur = cur->next();
    3133             :   }
    3134             :   return sum;
    3135             : }
    3136             : 
    3137             : int FreeListCategory::FreeListLength() {
    3138             :   int length = 0;
    3139             :   FreeSpace cur = top();
    3140             :   while (!cur.is_null()) {
    3141             :     length++;
    3142             :     cur = cur->next();
    3143             :     if (length == kVeryLongFreeList) return length;
    3144             :   }
    3145             :   return length;
    3146             : }
    3147             : 
    3148             : bool FreeList::IsVeryLong() {
    3149             :   int len = 0;
    3150             :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    3151             :     FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    3152             :     while (it.HasNext()) {
    3153             :       len += it.Next()->FreeListLength();
    3154             :       if (len >= FreeListCategory::kVeryLongFreeList) return true;
    3155             :     }
    3156             :   }
    3157             :   return false;
    3158             : }
    3159             : 
    3160             : 
    3161             : // This can take a very long time because it is linear in the number of entries
    3162             : // on the free list, so it should not be called if FreeListLength returns
    3163             : // kVeryLongFreeList.
    3164             : size_t FreeList::SumFreeLists() {
    3165             :   size_t sum = 0;
    3166             :   ForAllFreeListCategories(
    3167             :       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
    3168             :   return sum;
    3169             : }
    3170             : #endif
    3171             : 
    3172             : 
    3173             : // -----------------------------------------------------------------------------
    3174             : // OldSpace implementation
    3175             : 
    3176      223530 : void PagedSpace::PrepareForMarkCompact() {
    3177             :   // We don't have a linear allocation area while sweeping.  It will be restored
    3178             :   // on the first allocation after the sweep.
    3179      223530 :   FreeLinearAllocationArea();
    3180             : 
    3181             :   // Clear the free list before a full GC---it will be rebuilt afterward.
    3182      223530 :   free_list_.Reset();
    3183      223530 : }
    3184             : 
    3185    10491351 : size_t PagedSpace::SizeOfObjects() {
    3186    10491351 :   CHECK_GE(limit(), top());
    3187             :   DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
    3188    20982702 :   return Size() - (limit() - top());
    3189             : }
    3190             : 
    3191         135 : bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3192         270 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3193         135 :   if (collector->sweeping_in_progress()) {
    3194             :     // Wait for the sweeper threads here and complete the sweeping phase.
    3195          10 :     collector->EnsureSweepingCompleted();
    3196             : 
    3197             :     // After waiting for the sweeper threads, there may be new free-list
    3198             :     // entries.
    3199          10 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3200             :   }
    3201             :   return false;
    3202             : }
    3203             : 
    3204          30 : bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3205          40 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3206          35 :   if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
    3207           5 :     collector->sweeper()->ParallelSweepSpace(identity(), 0);
    3208           5 :     RefillFreeList();
    3209           5 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3210             :   }
    3211             :   return false;
    3212             : }
    3213             : 
    3214     1062007 : bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3215     2124014 :   VMState<GC> state(heap()->isolate());
    3216             :   RuntimeCallTimerScope runtime_timer(
    3217     1062007 :       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
    3218     2124014 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3219             : }
    3220             : 
    3221      169891 : bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3222      169891 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3223             : }
    3224             : 
    3225     1231911 : bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
    3226             :   // Allocation in this space has failed.
    3227             :   DCHECK_GE(size_in_bytes, 0);
    3228             :   const int kMaxPagesToSweep = 1;
    3229             : 
    3230     1231911 :   if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
    3231             : 
    3232     1578462 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3233             :   // Sweeping is still in progress.
    3234      505205 :   if (collector->sweeping_in_progress()) {
    3235      119273 :     if (FLAG_concurrent_sweeping && !is_local() &&
    3236       38811 :         !collector->sweeper()->AreSweeperTasksRunning()) {
    3237       26026 :       collector->EnsureSweepingCompleted();
    3238             :     }
    3239             : 
    3240             :     // First try to refill the free-list, concurrent sweeper threads
    3241             :     // may have freed some objects in the meantime.
    3242       80496 :     RefillFreeList();
    3243             : 
    3244             :     // Retry the free list allocation.
    3245       80534 :     if (RefillLinearAllocationAreaFromFreeList(
    3246             :             static_cast<size_t>(size_in_bytes)))
    3247             :       return true;
    3248             : 
    3249             :     // If sweeping is still in progress try to sweep pages.
    3250             :     int max_freed = collector->sweeper()->ParallelSweepSpace(
    3251       49407 :         identity(), size_in_bytes, kMaxPagesToSweep);
    3252       49435 :     RefillFreeList();
    3253       49435 :     if (max_freed >= size_in_bytes) {
    3254       37110 :       if (RefillLinearAllocationAreaFromFreeList(
    3255             :               static_cast<size_t>(size_in_bytes)))
    3256             :         return true;
    3257             :     }
    3258      424743 :   } else if (is_local()) {
    3259             :     // Sweeping not in progress and we are on a {CompactionSpace}. This can
    3260             :     // only happen when we are evacuating for the young generation.
    3261       25602 :     PagedSpace* main_space = heap()->paged_space(identity());
    3262       25602 :     Page* page = main_space->RemovePageSafe(size_in_bytes);
    3263       25620 :     if (page != nullptr) {
    3264       18949 :       AddPage(page);
    3265       18949 :       if (RefillLinearAllocationAreaFromFreeList(
    3266             :               static_cast<size_t>(size_in_bytes)))
    3267             :         return true;
    3268             :     }
    3269             :   }
    3270             : 
    3271      428630 :   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
    3272             :     DCHECK((CountTotalPages() > 1) ||
    3273             :            (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
    3274             :     return RefillLinearAllocationAreaFromFreeList(
    3275      428464 :         static_cast<size_t>(size_in_bytes));
    3276             :   }
    3277             : 
    3278             :   // If sweeper threads are active, wait for them at that point and steal
    3279             :   // elements form their free-lists. Allocation may still fail their which
    3280             :   // would indicate that there is not enough memory for the given allocation.
    3281         165 :   return SweepAndRetryAllocation(size_in_bytes);
    3282             : }
    3283             : 
    3284             : // -----------------------------------------------------------------------------
    3285             : // MapSpace implementation
    3286             : 
    3287             : #ifdef VERIFY_HEAP
    3288             : void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
    3289             : #endif
    3290             : 
    3291       61049 : ReadOnlySpace::ReadOnlySpace(Heap* heap)
    3292             :     : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
    3293      122098 :       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
    3294       61049 : }
    3295             : 
    3296       61492 : void ReadOnlyPage::MakeHeaderRelocatable() {
    3297       61492 :   if (mutex_ != nullptr) {
    3298             :     // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
    3299       61049 :     delete mutex_;
    3300       61049 :     mutex_ = nullptr;
    3301       61049 :     local_tracker_ = nullptr;
    3302       61049 :     reservation_.Reset();
    3303             :   }
    3304       61492 : }
    3305             : 
    3306      122969 : void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
    3307      245938 :   MemoryAllocator* memory_allocator = heap()->memory_allocator();
    3308      245938 :   for (Page* p : *this) {
    3309             :     ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
    3310      122969 :     if (access == PageAllocator::kRead) {
    3311       61492 :       page->MakeHeaderRelocatable();
    3312             :     }
    3313             : 
    3314             :     // Read only pages don't have valid reservation object so we get proper
    3315             :     // page allocator manually.
    3316             :     v8::PageAllocator* page_allocator =
    3317      122969 :         memory_allocator->page_allocator(page->executable());
    3318      122969 :     CHECK(
    3319             :         SetPermissions(page_allocator, page->address(), page->size(), access));
    3320             :   }
    3321      122969 : }
    3322             : 
    3323             : // After we have booted, we have created a map which represents free space
    3324             : // on the heap.  If there was already a free list then the elements on it
    3325             : // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
    3326             : // fix them.
    3327       60993 : void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
    3328       60993 :   free_list_.RepairLists(heap());
    3329             :   // Each page may have a small free space that is not tracked by a free list.
    3330             :   // Those free spaces still contain null as their map pointer.
    3331             :   // Overwrite them with new fillers.
    3332      182979 :   for (Page* page : *this) {
    3333       60993 :     int size = static_cast<int>(page->wasted_memory());
    3334       60993 :     if (size == 0) {
    3335             :       // If there is no wasted memory then all free space is in the free list.
    3336             :       continue;
    3337             :     }
    3338           0 :     Address start = page->HighWaterMark();
    3339             :     Address end = page->area_end();
    3340           0 :     if (start < end - size) {
    3341             :       // A region at the high watermark is already in free list.
    3342           0 :       HeapObject filler = HeapObject::FromAddress(start);
    3343           0 :       CHECK(filler->IsFiller());
    3344           0 :       start += filler->Size();
    3345             :     }
    3346           0 :     CHECK_EQ(size, static_cast<int>(end - start));
    3347           0 :     heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
    3348             :   }
    3349       60993 : }
    3350             : 
    3351         620 : void ReadOnlySpace::ClearStringPaddingIfNeeded() {
    3352        1240 :   if (is_string_padding_cleared_) return;
    3353             : 
    3354             :   WritableScope writable_scope(this);
    3355         886 :   for (Page* page : *this) {
    3356         443 :     HeapObjectIterator iterator(page);
    3357     1005714 :     for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
    3358      502414 :       if (o->IsSeqOneByteString()) {
    3359      223715 :         SeqOneByteString::cast(o)->clear_padding();
    3360      278699 :       } else if (o->IsSeqTwoByteString()) {
    3361           0 :         SeqTwoByteString::cast(o)->clear_padding();
    3362             :       }
    3363             :     }
    3364             :   }
    3365         443 :   is_string_padding_cleared_ = true;
    3366             : }
    3367             : 
    3368       61492 : void ReadOnlySpace::MarkAsReadOnly() {
    3369             :   DCHECK(!is_marked_read_only_);
    3370       61492 :   FreeLinearAllocationArea();
    3371       61492 :   is_marked_read_only_ = true;
    3372       61492 :   SetPermissionsForPages(PageAllocator::kRead);
    3373       61492 : }
    3374             : 
    3375           0 : void ReadOnlySpace::MarkAsReadWrite() {
    3376             :   DCHECK(is_marked_read_only_);
    3377       61477 :   SetPermissionsForPages(PageAllocator::kReadWrite);
    3378       61477 :   is_marked_read_only_ = false;
    3379           0 : }
    3380             : 
    3381       81152 : Address LargePage::GetAddressToShrink(Address object_address,
    3382             :                                       size_t object_size) {
    3383       81152 :   if (executable() == EXECUTABLE) {
    3384             :     return 0;
    3385             :   }
    3386       19672 :   size_t used_size = ::RoundUp((object_address - address()) + object_size,
    3387        9836 :                                MemoryAllocator::GetCommitPageSize());
    3388        9836 :   if (used_size < CommittedPhysicalMemory()) {
    3389          53 :     return address() + used_size;
    3390             :   }
    3391             :   return 0;
    3392             : }
    3393             : 
    3394          53 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
    3395             :   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
    3396         159 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3397             :   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
    3398          53 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3399          53 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
    3400          53 :   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
    3401          53 : }
    3402             : 
    3403             : // -----------------------------------------------------------------------------
    3404             : // LargeObjectIterator
    3405             : 
    3406       22755 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
    3407       22755 :   current_ = space->first_page();
    3408           0 : }
    3409             : 
    3410       26585 : HeapObject LargeObjectIterator::Next() {
    3411      256757 :   if (current_ == nullptr) return HeapObject();
    3412             : 
    3413             :   HeapObject object = current_->GetObject();
    3414        3830 :   current_ = current_->next_page();
    3415        3830 :   return object;
    3416             : }
    3417             : 
    3418             : // -----------------------------------------------------------------------------
    3419             : // LargeObjectSpace
    3420             : 
    3421       61049 : LargeObjectSpace::LargeObjectSpace(Heap* heap)
    3422       61049 :     : LargeObjectSpace(heap, LO_SPACE) {}
    3423             : 
    3424           0 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
    3425      183147 :     : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
    3426             : 
    3427      183102 : void LargeObjectSpace::TearDown() {
    3428      422179 :   while (!memory_chunk_list_.Empty()) {
    3429             :     LargePage* page = first_page();
    3430      167925 :     LOG(heap()->isolate(),
    3431             :         DeleteEvent("LargeObjectChunk",
    3432             :                     reinterpret_cast<void*>(page->address())));
    3433       55975 :     memory_chunk_list_.Remove(page);
    3434       55975 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    3435             :   }
    3436      183102 : }
    3437             : 
    3438       17275 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
    3439       17275 :   return AllocateRaw(object_size, NOT_EXECUTABLE);
    3440             : }
    3441             : 
    3442       61246 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
    3443             :                                                Executability executable) {
    3444             :   // Check if we want to force a GC before growing the old space further.
    3445             :   // If so, fail the allocation.
    3446      427648 :   if (!heap()->CanExpandOldGeneration(object_size) ||
    3447       61162 :       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
    3448             :     return AllocationResult::Retry(identity());
    3449             :   }
    3450             : 
    3451       61048 :   LargePage* page = AllocateLargePage(object_size, executable);
    3452       61048 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3453             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3454             :   HeapObject object = page->GetObject();
    3455             :   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    3456             :       heap()->GCFlagsForIncrementalMarking(),
    3457       61048 :       kGCCallbackScheduleIdleGarbageCollection);
    3458       61048 :   if (heap()->incremental_marking()->black_allocation()) {
    3459             :     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
    3460             :   }
    3461             :   DCHECK_IMPLIES(
    3462             :       heap()->incremental_marking()->black_allocation(),
    3463             :       heap()->incremental_marking()->marking_state()->IsBlack(object));
    3464             :   page->InitializationMemoryFence();
    3465       61048 :   heap()->NotifyOldGenerationExpansion();
    3466       61048 :   AllocationStep(object_size, object->address(), object_size);
    3467       61048 :   return object;
    3468             : }
    3469             : 
    3470       61048 : LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
    3471             :                                                Executability executable) {
    3472             :   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
    3473      183144 :       object_size, this, executable);
    3474       61048 :   if (page == nullptr) return nullptr;
    3475             :   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
    3476             : 
    3477       61048 :   AddPage(page, object_size);
    3478             : 
    3479             :   HeapObject object = page->GetObject();
    3480             : 
    3481             :   heap()->CreateFillerObjectAt(object->address(), object_size,
    3482       61048 :                                ClearRecordedSlots::kNo);
    3483       61048 :   return page;
    3484             : }
    3485             : 
    3486             : 
    3487         753 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
    3488             :   // On a platform that provides lazy committing of memory, we over-account
    3489             :   // the actually committed memory. There is no easy way right now to support
    3490             :   // precise accounting of committed memory in large object space.
    3491         753 :   return CommittedMemory();
    3492             : }
    3493             : 
    3494      529740 : LargePage* CodeLargeObjectSpace::FindPage(Address a) {
    3495      529740 :   const Address key = MemoryChunk::FromAddress(a)->address();
    3496             :   auto it = chunk_map_.find(key);
    3497      529740 :   if (it != chunk_map_.end()) {
    3498          10 :     LargePage* page = it->second;
    3499          10 :     CHECK(page->Contains(a));
    3500             :     return page;
    3501             :   }
    3502             :   return nullptr;
    3503             : }
    3504             : 
    3505      149020 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
    3506             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3507             :       heap()->incremental_marking()->non_atomic_marking_state();
    3508             :   LargeObjectIterator it(this);
    3509      379192 :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3510       81152 :     if (marking_state->IsBlackOrGrey(obj)) {
    3511             :       Marking::MarkWhite(marking_state->MarkBitFrom(obj));
    3512             :       MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    3513       81152 :       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
    3514       81152 :       chunk->ResetProgressBar();
    3515             :       marking_state->SetLiveBytes(chunk, 0);
    3516             :     }
    3517             :     DCHECK(marking_state->IsWhite(obj));
    3518             :   }
    3519      149020 : }
    3520             : 
    3521       43967 : void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
    3522      176058 :   for (Address current = reinterpret_cast<Address>(page);
    3523       88029 :        current < reinterpret_cast<Address>(page) + page->size();
    3524             :        current += MemoryChunk::kPageSize) {
    3525       44062 :     chunk_map_[current] = page;
    3526             :   }
    3527       43967 : }
    3528             : 
    3529          15 : void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
    3530         225 :   for (Address current = page->address();
    3531         105 :        current < reinterpret_cast<Address>(page) + page->size();
    3532             :        current += MemoryChunk::kPageSize) {
    3533             :     chunk_map_.erase(current);
    3534             :   }
    3535          15 : }
    3536             : 
    3537           0 : void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
    3538             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3539             :   DCHECK(page->IsLargePage());
    3540             :   DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
    3541             :   DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
    3542           0 :   size_t object_size = static_cast<size_t>(page->GetObject()->Size());
    3543           0 :   static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
    3544           0 :   AddPage(page, object_size);
    3545             :   page->ClearFlag(MemoryChunk::FROM_PAGE);
    3546           0 :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3547           0 :   page->set_owner(this);
    3548           0 : }
    3549             : 
    3550       61048 : void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3551       61048 :   size_ += static_cast<int>(page->size());
    3552             :   AccountCommitted(page->size());
    3553       61048 :   objects_size_ += object_size;
    3554       61048 :   page_count_++;
    3555             :   memory_chunk_list_.PushBack(page);
    3556       61048 : }
    3557             : 
    3558        5073 : void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3559        5073 :   size_ -= static_cast<int>(page->size());
    3560             :   AccountUncommitted(page->size());
    3561        5073 :   objects_size_ -= object_size;
    3562        5073 :   page_count_--;
    3563        5073 :   memory_chunk_list_.Remove(page);
    3564        5073 : }
    3565             : 
    3566      223530 : void LargeObjectSpace::FreeUnmarkedObjects() {
    3567             :   LargePage* current = first_page();
    3568             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3569        5126 :       heap()->incremental_marking()->non_atomic_marking_state();
    3570             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3571             :   // updating it after every GC.
    3572             :   size_t surviving_object_size = 0;
    3573      533285 :   while (current) {
    3574             :     LargePage* next_current = current->next_page();
    3575       86225 :     HeapObject object = current->GetObject();
    3576             :     DCHECK(!marking_state->IsGrey(object));
    3577       86225 :     size_t size = static_cast<size_t>(object->Size());
    3578       86225 :     if (marking_state->IsBlack(object)) {
    3579             :       Address free_start;
    3580       81152 :       surviving_object_size += size;
    3581       81152 :       if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
    3582             :           0) {
    3583             :         DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
    3584          53 :         current->ClearOutOfLiveRangeSlots(free_start);
    3585             :         const size_t bytes_to_free =
    3586         106 :             current->size() - (free_start - current->address());
    3587             :         heap()->memory_allocator()->PartialFreeMemory(
    3588             :             current, free_start, bytes_to_free,
    3589         106 :             current->area_start() + object->Size());
    3590          53 :         size_ -= bytes_to_free;
    3591             :         AccountUncommitted(bytes_to_free);
    3592             :       }
    3593             :     } else {
    3594        5073 :       RemovePage(current, size);
    3595             :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
    3596        5073 :           current);
    3597             :     }
    3598             :     current = next_current;
    3599             :   }
    3600      223530 :   objects_size_ = surviving_object_size;
    3601      223530 : }
    3602             : 
    3603          85 : bool LargeObjectSpace::Contains(HeapObject object) {
    3604             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3605             : 
    3606          85 :   bool owned = (chunk->owner() == this);
    3607             : 
    3608             :   SLOW_DCHECK(!owned || ContainsSlow(object->address()));
    3609             : 
    3610          85 :   return owned;
    3611             : }
    3612             : 
    3613           0 : bool LargeObjectSpace::ContainsSlow(Address addr) {
    3614           0 :   for (LargePage* page : *this) {
    3615           0 :     if (page->Contains(addr)) return true;
    3616             :   }
    3617             :   return false;
    3618             : }
    3619             : 
    3620       22755 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
    3621       22755 :   return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
    3622             : }
    3623             : 
    3624             : #ifdef VERIFY_HEAP
    3625             : // We do not assume that the large object iterator works, because it depends
    3626             : // on the invariants we are checking during verification.
    3627             : void LargeObjectSpace::Verify(Isolate* isolate) {
    3628             :   size_t external_backing_store_bytes[kNumTypes];
    3629             : 
    3630             :   for (int i = 0; i < kNumTypes; i++) {
    3631             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    3632             :   }
    3633             : 
    3634             :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3635             :        chunk = chunk->next_page()) {
    3636             :     // Each chunk contains an object that starts at the large object page's
    3637             :     // object area start.
    3638             :     HeapObject object = chunk->GetObject();
    3639             :     Page* page = Page::FromHeapObject(object);
    3640             :     CHECK(object->address() == page->area_start());
    3641             : 
    3642             :     // The first word should be a map, and we expect all map pointers to be
    3643             :     // in map space or read-only space.
    3644             :     Map map = object->map();
    3645             :     CHECK(map->IsMap());
    3646             :     CHECK(heap()->map_space()->Contains(map) ||
    3647             :           heap()->read_only_space()->Contains(map));
    3648             : 
    3649             :     // We have only the following types in the large object space:
    3650             :     if (!(object->IsAbstractCode() || object->IsSeqString() ||
    3651             :           object->IsExternalString() || object->IsThinString() ||
    3652             :           object->IsFixedArray() || object->IsFixedDoubleArray() ||
    3653             :           object->IsWeakFixedArray() || object->IsWeakArrayList() ||
    3654             :           object->IsPropertyArray() || object->IsByteArray() ||
    3655             :           object->IsFeedbackVector() || object->IsBigInt() ||
    3656             :           object->IsFreeSpace() || object->IsFeedbackMetadata() ||
    3657             :           object->IsContext() ||
    3658             :           object->IsUncompiledDataWithoutPreparseData() ||
    3659             :           object->IsPreparseData()) &&
    3660             :         !FLAG_young_generation_large_objects) {
    3661             :       FATAL("Found invalid Object (instance_type=%i) in large object space.",
    3662             :             object->map()->instance_type());
    3663             :     }
    3664             : 
    3665             :     // The object itself should look OK.
    3666             :     object->ObjectVerify(isolate);
    3667             : 
    3668             :     if (!FLAG_verify_heap_skip_remembered_set) {
    3669             :       heap()->VerifyRememberedSetFor(object);
    3670             :     }
    3671             : 
    3672             :     // Byte arrays and strings don't have interior pointers.
    3673             :     if (object->IsAbstractCode()) {
    3674             :       VerifyPointersVisitor code_visitor(heap());
    3675             :       object->IterateBody(map, object->Size(), &code_visitor);
    3676             :     } else if (object->IsFixedArray()) {
    3677             :       FixedArray array = FixedArray::cast(object);
    3678             :       for (int j = 0; j < array->length(); j++) {
    3679             :         Object element = array->get(j);
    3680             :         if (element->IsHeapObject()) {
    3681             :           HeapObject element_object = HeapObject::cast(element);
    3682             :           CHECK(heap()->Contains(element_object));
    3683             :           CHECK(element_object->map()->IsMap());
    3684             :         }
    3685             :       }
    3686             :     } else if (object->IsPropertyArray()) {
    3687             :       PropertyArray array = PropertyArray::cast(object);
    3688             :       for (int j = 0; j < array->length(); j++) {
    3689             :         Object property = array->get(j);
    3690             :         if (property->IsHeapObject()) {
    3691             :           HeapObject property_object = HeapObject::cast(property);
    3692             :           CHECK(heap()->Contains(property_object));
    3693             :           CHECK(property_object->map()->IsMap());
    3694             :         }
    3695             :       }
    3696             :     }
    3697             :     for (int i = 0; i < kNumTypes; i++) {
    3698             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3699             :       external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
    3700             :     }
    3701             :   }
    3702             :   for (int i = 0; i < kNumTypes; i++) {
    3703             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3704             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    3705             :   }
    3706             : }
    3707             : #endif
    3708             : 
    3709             : #ifdef DEBUG
    3710             : void LargeObjectSpace::Print() {
    3711             :   StdoutStream os;
    3712             :   LargeObjectIterator it(this);
    3713             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3714             :     obj->Print(os);
    3715             :   }
    3716             : }
    3717             : 
    3718             : void Page::Print() {
    3719             :   // Make a best-effort to print the objects in the page.
    3720             :   PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
    3721             :          this->owner()->name());
    3722             :   printf(" --------------------------------------\n");
    3723             :   HeapObjectIterator objects(this);
    3724             :   unsigned mark_size = 0;
    3725             :   for (HeapObject object = objects.Next(); !object.is_null();
    3726             :        object = objects.Next()) {
    3727             :     bool is_marked =
    3728             :         heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
    3729             :     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    3730             :     if (is_marked) {
    3731             :       mark_size += object->Size();
    3732             :     }
    3733             :     object->ShortPrint();
    3734             :     PrintF("\n");
    3735             :   }
    3736             :   printf(" --------------------------------------\n");
    3737             :   printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
    3738             :          heap()->incremental_marking()->marking_state()->live_bytes(this));
    3739             : }
    3740             : 
    3741             : #endif  // DEBUG
    3742             : 
    3743       61049 : NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
    3744             :     : LargeObjectSpace(heap, NEW_LO_SPACE),
    3745             :       pending_object_(0),
    3746       61049 :       capacity_(capacity) {}
    3747             : 
    3748           0 : AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
    3749             :   // Do not allocate more objects if promoting the existing object would exceed
    3750             :   // the old generation capacity.
    3751           0 :   if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
    3752             :     return AllocationResult::Retry(identity());
    3753             :   }
    3754             : 
    3755             :   // Allocation for the first object must succeed independent from the capacity.
    3756           0 :   if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
    3757             :     return AllocationResult::Retry(identity());
    3758             :   }
    3759             : 
    3760           0 :   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
    3761           0 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3762             : 
    3763             :   // The size of the first object may exceed the capacity.
    3764           0 :   capacity_ = Max(capacity_, SizeOfObjects());
    3765             : 
    3766             :   HeapObject result = page->GetObject();
    3767             :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3768             :   page->SetFlag(MemoryChunk::TO_PAGE);
    3769             :   pending_object_.store(result->address(), std::memory_order_relaxed);
    3770             : #ifdef ENABLE_MINOR_MC
    3771           0 :   if (FLAG_minor_mc) {
    3772             :     page->AllocateYoungGenerationBitmap();
    3773             :     heap()
    3774             :         ->minor_mark_compact_collector()
    3775             :         ->non_atomic_marking_state()
    3776           0 :         ->ClearLiveness(page);
    3777             :   }
    3778             : #endif  // ENABLE_MINOR_MC
    3779             :   page->InitializationMemoryFence();
    3780             :   DCHECK(page->IsLargePage());
    3781             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3782           0 :   AllocationStep(object_size, result->address(), object_size);
    3783           0 :   return result;
    3784             : }
    3785             : 
    3786         311 : size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
    3787             : 
    3788       98000 : void NewLargeObjectSpace::Flip() {
    3789      196000 :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3790             :        chunk = chunk->next_page()) {
    3791             :     chunk->SetFlag(MemoryChunk::FROM_PAGE);
    3792             :     chunk->ClearFlag(MemoryChunk::TO_PAGE);
    3793             :   }
    3794       98000 : }
    3795             : 
    3796       23490 : void NewLargeObjectSpace::FreeDeadObjects(
    3797             :     const std::function<bool(HeapObject)>& is_dead) {
    3798       23490 :   bool is_marking = heap()->incremental_marking()->IsMarking();
    3799             :   size_t surviving_object_size = 0;
    3800       23490 :   for (auto it = begin(); it != end();) {
    3801             :     LargePage* page = *it;
    3802             :     it++;
    3803           0 :     HeapObject object = page->GetObject();
    3804           0 :     size_t size = static_cast<size_t>(object->Size());
    3805           0 :     if (is_dead(object)) {
    3806           0 :       RemovePage(page, size);
    3807           0 :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    3808           0 :       if (FLAG_concurrent_marking && is_marking) {
    3809           0 :         heap()->concurrent_marking()->ClearMemoryChunkData(page);
    3810             :       }
    3811             :     } else {
    3812           0 :       surviving_object_size += size;
    3813             :     }
    3814             :   }
    3815             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3816             :   // updating it after every GC.
    3817       23490 :   objects_size_ = surviving_object_size;
    3818       23490 : }
    3819             : 
    3820      122449 : void NewLargeObjectSpace::SetCapacity(size_t capacity) {
    3821      244898 :   capacity_ = Max(capacity, SizeOfObjects());
    3822      122449 : }
    3823             : 
    3824       61049 : CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
    3825             :     : LargeObjectSpace(heap, CODE_LO_SPACE),
    3826      122098 :       chunk_map_(kInitialChunkMapCapacity) {}
    3827             : 
    3828       43971 : AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
    3829       43971 :   return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
    3830             : }
    3831             : 
    3832       43967 : void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3833       43967 :   LargeObjectSpace::AddPage(page, object_size);
    3834       43967 :   InsertChunkMapEntries(page);
    3835       43967 : }
    3836             : 
    3837          15 : void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3838          15 :   RemoveChunkMapEntries(page);
    3839          15 :   LargeObjectSpace::RemovePage(page, object_size);
    3840          15 : }
    3841             : 
    3842             : }  // namespace internal
    3843      178779 : }  // namespace v8

Generated by: LCOV version 1.10