LCOV - code coverage report
Current view: top level - src/heap - spaces.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1224 1390 88.1 %
Date: 2019-01-20 Functions: 207 247 83.8 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/spaces.h"
       6             : 
       7             : #include <utility>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/macros.h"
      11             : #include "src/base/platform/semaphore.h"
      12             : #include "src/base/template-utils.h"
      13             : #include "src/counters.h"
      14             : #include "src/heap/array-buffer-tracker.h"
      15             : #include "src/heap/concurrent-marking.h"
      16             : #include "src/heap/gc-tracer.h"
      17             : #include "src/heap/heap-controller.h"
      18             : #include "src/heap/incremental-marking-inl.h"
      19             : #include "src/heap/mark-compact.h"
      20             : #include "src/heap/remembered-set.h"
      21             : #include "src/heap/slot-set.h"
      22             : #include "src/heap/sweeper.h"
      23             : #include "src/msan.h"
      24             : #include "src/objects-inl.h"
      25             : #include "src/objects/free-space-inl.h"
      26             : #include "src/objects/js-array-buffer-inl.h"
      27             : #include "src/objects/js-array-inl.h"
      28             : #include "src/snapshot/snapshot.h"
      29             : #include "src/v8.h"
      30             : #include "src/vm-state-inl.h"
      31             : 
      32             : namespace v8 {
      33             : namespace internal {
      34             : 
      35             : // These checks are here to ensure that the lower 32 bits of any real heap
      36             : // object can't overlap with the lower 32 bits of cleared weak reference value
      37             : // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
      38             : // in order to figure out if it's a cleared weak reference or not.
      39             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
      40             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
      41             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
      42             : 
      43             : // ----------------------------------------------------------------------------
      44             : // HeapObjectIterator
      45             : 
      46           6 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
      47             :     : cur_addr_(kNullAddress),
      48             :       cur_end_(kNullAddress),
      49             :       space_(space),
      50             :       page_range_(space->first_page(), nullptr),
      51       31424 :       current_page_(page_range_.begin()) {}
      52             : 
      53         441 : HeapObjectIterator::HeapObjectIterator(Page* page)
      54             :     : cur_addr_(kNullAddress),
      55             :       cur_end_(kNullAddress),
      56             :       space_(reinterpret_cast<PagedSpace*>(page->owner())),
      57             :       page_range_(page),
      58         882 :       current_page_(page_range_.begin()) {
      59             : #ifdef DEBUG
      60             :   Space* owner = page->owner();
      61             :   DCHECK(owner == page->heap()->old_space() ||
      62             :          owner == page->heap()->map_space() ||
      63             :          owner == page->heap()->code_space() ||
      64             :          owner == page->heap()->read_only_space());
      65             : #endif  // DEBUG
      66         441 : }
      67             : 
      68             : // We have hit the end of the page and should advance to the next block of
      69             : // objects.  This happens at the end of the page.
      70       84762 : bool HeapObjectIterator::AdvanceToNextPage() {
      71             :   DCHECK_EQ(cur_addr_, cur_end_);
      72      169524 :   if (current_page_ == page_range_.end()) return false;
      73             :   Page* cur_page = *(current_page_++);
      74       52905 :   Heap* heap = space_->heap();
      75             : 
      76       52905 :   heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
      77             : #ifdef ENABLE_MINOR_MC
      78      158715 :   if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
      79             :     heap->minor_mark_compact_collector()->MakeIterable(
      80             :         cur_page, MarkingTreatmentMode::CLEAR,
      81           0 :         FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
      82             : #else
      83             :   DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
      84             : #endif  // ENABLE_MINOR_MC
      85       52905 :   cur_addr_ = cur_page->area_start();
      86       52905 :   cur_end_ = cur_page->area_end();
      87             :   DCHECK(cur_page->SweepingDone());
      88       52905 :   return true;
      89             : }
      90             : 
      91      108138 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
      92      108138 :     : heap_(heap) {
      93             :   DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
      94             : 
      95     1081380 :   for (SpaceIterator it(heap_); it.has_next();) {
      96      865104 :     it.next()->PauseAllocationObservers();
      97      108138 :   }
      98      108138 : }
      99             : 
     100      108138 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
     101     1081380 :   for (SpaceIterator it(heap_); it.has_next();) {
     102      865104 :     it.next()->ResumeAllocationObservers();
     103      108138 :   }
     104      108138 : }
     105             : 
     106             : static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
     107             :     LAZY_INSTANCE_INITIALIZER;
     108             : 
     109       63904 : Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
     110       63904 :   base::MutexGuard guard(&mutex_);
     111             :   auto it = recently_freed_.find(code_range_size);
     112       66176 :   if (it == recently_freed_.end() || it->second.empty()) {
     113       61761 :     return reinterpret_cast<Address>(GetRandomMmapAddr());
     114             :   }
     115        2143 :   Address result = it->second.back();
     116             :   it->second.pop_back();
     117        2143 :   return result;
     118             : }
     119             : 
     120       63886 : void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
     121             :                                                 size_t code_range_size) {
     122       63886 :   base::MutexGuard guard(&mutex_);
     123       63886 :   recently_freed_[code_range_size].push_back(code_range_start);
     124       63886 : }
     125             : 
     126             : // -----------------------------------------------------------------------------
     127             : // MemoryAllocator
     128             : //
     129             : 
     130       63898 : MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
     131             :                                  size_t code_range_size)
     132             :     : isolate_(isolate),
     133       63898 :       data_page_allocator_(isolate->page_allocator()),
     134             :       code_page_allocator_(nullptr),
     135             :       capacity_(RoundUp(capacity, Page::kPageSize)),
     136             :       size_(0),
     137             :       size_executable_(0),
     138             :       lowest_ever_allocated_(static_cast<Address>(-1ll)),
     139             :       highest_ever_allocated_(kNullAddress),
     140      255592 :       unmapper_(isolate->heap(), this) {
     141       63898 :   InitializeCodePageAllocator(data_page_allocator_, code_range_size);
     142       63898 : }
     143             : 
     144       63898 : void MemoryAllocator::InitializeCodePageAllocator(
     145             :     v8::PageAllocator* page_allocator, size_t requested) {
     146             :   DCHECK_NULL(code_page_allocator_instance_.get());
     147             : 
     148       63898 :   code_page_allocator_ = page_allocator;
     149             : 
     150       63898 :   if (requested == 0) {
     151       63898 :     if (!kRequiresCodeRange) return;
     152             :     // When a target requires the code range feature, we put all code objects
     153             :     // in a kMaximalCodeRangeSize range of virtual address space, so that
     154             :     // they can call each other with near calls.
     155             :     requested = kMaximalCodeRangeSize;
     156           1 :   } else if (requested <= kMinimumCodeRangeSize) {
     157             :     requested = kMinimumCodeRangeSize;
     158             :   }
     159             : 
     160             :   const size_t reserved_area =
     161             :       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
     162             :   if (requested < (kMaximalCodeRangeSize - reserved_area)) {
     163             :     requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
     164             :     // Fullfilling both reserved pages requirement and huge code area
     165             :     // alignments is not supported (requires re-implementation).
     166             :     DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
     167             :   }
     168             :   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
     169             : 
     170             :   Address hint =
     171             :       RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
     172      127796 :                 page_allocator->AllocatePageSize());
     173             :   VirtualMemory reservation(
     174             :       page_allocator, requested, reinterpret_cast<void*>(hint),
     175      127796 :       Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
     176       63898 :   if (!reservation.IsReserved()) {
     177             :     V8::FatalProcessOutOfMemory(isolate_,
     178       63898 :                                 "CodeRange setup: allocate virtual memory");
     179             :   }
     180       63898 :   code_range_ = reservation.region();
     181             : 
     182             :   // We are sure that we have mapped a block of requested addresses.
     183             :   DCHECK_GE(reservation.size(), requested);
     184             :   Address base = reservation.address();
     185             : 
     186             :   // On some platforms, specifically Win64, we need to reserve some pages at
     187             :   // the beginning of an executable space. See
     188             :   //   https://cs.chromium.org/chromium/src/components/crash/content/
     189             :   //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
     190             :   // for details.
     191             :   if (reserved_area > 0) {
     192             :     if (!reservation.SetPermissions(base, reserved_area,
     193             :                                     PageAllocator::kReadWrite))
     194             :       V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
     195             : 
     196             :     base += reserved_area;
     197             :   }
     198       63898 :   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
     199             :   size_t size =
     200       63898 :       RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
     201       63898 :                 MemoryChunk::kPageSize);
     202             :   DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
     203             : 
     204      127796 :   LOG(isolate_,
     205             :       NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
     206             :                requested));
     207             : 
     208       63898 :   heap_reservation_.TakeControl(&reservation);
     209      127796 :   code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
     210             :       page_allocator, aligned_base, size,
     211             :       static_cast<size_t>(MemoryChunk::kAlignment));
     212       63898 :   code_page_allocator_ = code_page_allocator_instance_.get();
     213             : }
     214             : 
     215       63883 : void MemoryAllocator::TearDown() {
     216       63883 :   unmapper()->TearDown();
     217             : 
     218             :   // Check that spaces were torn down before MemoryAllocator.
     219             :   DCHECK_EQ(size_, 0u);
     220             :   // TODO(gc) this will be true again when we fix FreeMemory.
     221             :   // DCHECK_EQ(0, size_executable_);
     222       63883 :   capacity_ = 0;
     223             : 
     224       63883 :   if (last_chunk_.IsReserved()) {
     225           0 :     last_chunk_.Free();
     226             :   }
     227             : 
     228       63883 :   if (code_page_allocator_instance_.get()) {
     229             :     DCHECK(!code_range_.is_empty());
     230             :     code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
     231      127766 :                                                             code_range_.size());
     232       63883 :     code_range_ = base::AddressRegion();
     233             :     code_page_allocator_instance_.reset();
     234             :   }
     235       63883 :   code_page_allocator_ = nullptr;
     236       63883 :   data_page_allocator_ = nullptr;
     237       63883 : }
     238             : 
     239      381085 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
     240             :  public:
     241             :   explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
     242             :       : CancelableTask(isolate),
     243             :         unmapper_(unmapper),
     244      190653 :         tracer_(isolate->heap()->tracer()) {}
     245             : 
     246             :  private:
     247      178797 :   void RunInternal() override {
     248      715285 :     TRACE_BACKGROUND_GC(tracer_,
     249             :                         GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
     250      178803 :     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     251      178856 :     unmapper_->active_unmapping_tasks_--;
     252      178856 :     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
     253      178884 :     if (FLAG_trace_unmapper) {
     254           0 :       PrintIsolate(unmapper_->heap_->isolate(),
     255           0 :                    "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
     256      178861 :     }
     257      178919 :   }
     258             : 
     259             :   Unmapper* const unmapper_;
     260             :   GCTracer* const tracer_;
     261             :   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
     262             : };
     263             : 
     264      270696 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
     265      270696 :   if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
     266      190659 :     if (!MakeRoomForNewTasks()) {
     267             :       // kMaxUnmapperTasks are already running. Avoid creating any more.
     268           6 :       if (FLAG_trace_unmapper) {
     269           0 :         PrintIsolate(heap_->isolate(),
     270             :                      "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
     271           0 :                      kMaxUnmapperTasks);
     272             :       }
     273      270701 :       return;
     274             :     }
     275      381306 :     auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
     276      190653 :     if (FLAG_trace_unmapper) {
     277           0 :       PrintIsolate(heap_->isolate(),
     278             :                    "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
     279           0 :                    task->id());
     280             :     }
     281             :     DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
     282             :     DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
     283             :     DCHECK_GE(active_unmapping_tasks_, 0);
     284             :     active_unmapping_tasks_++;
     285      381306 :     task_ids_[pending_unmapping_tasks_++] = task->id();
     286      571959 :     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     287             :   } else {
     288       80037 :     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     289             :   }
     290             : }
     291             : 
     292      222074 : void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
     293      412727 :   for (int i = 0; i < pending_unmapping_tasks_; i++) {
     294      381306 :     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
     295             :         TryAbortResult::kTaskAborted) {
     296      178926 :       pending_unmapping_tasks_semaphore_.Wait();
     297             :     }
     298             :   }
     299      222074 :   pending_unmapping_tasks_ = 0;
     300             :   active_unmapping_tasks_ = 0;
     301             : 
     302      222074 :   if (FLAG_trace_unmapper) {
     303             :     PrintIsolate(
     304           0 :         heap_->isolate(),
     305           0 :         "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
     306             :   }
     307      222074 : }
     308             : 
     309       83492 : void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
     310       83492 :   CancelAndWaitForPendingTasks();
     311             :   // Free non-regular chunks because they cannot be re-used.
     312       83492 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     313       83492 : }
     314             : 
     315       73002 : void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
     316       73002 :   CancelAndWaitForPendingTasks();
     317       73002 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     318       73003 : }
     319             : 
     320      190659 : bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
     321             :   DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
     322             : 
     323      190659 :   if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
     324             :     // All previous unmapping tasks have been run to completion.
     325             :     // Finalize those tasks to make room for new ones.
     326       65580 :     CancelAndWaitForPendingTasks();
     327             :   }
     328      190659 :   return pending_unmapping_tasks_ != kMaxUnmapperTasks;
     329             : }
     330             : 
     331      479310 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
     332             :   MemoryChunk* chunk = nullptr;
     333      965703 :   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     334        7083 :     allocator_->PerformFreeMemory(chunk);
     335             :   }
     336      479325 : }
     337             : 
     338             : template <MemoryAllocator::Unmapper::FreeMode mode>
     339      395554 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
     340             :   MemoryChunk* chunk = nullptr;
     341      395554 :   if (FLAG_trace_unmapper) {
     342           0 :     PrintIsolate(
     343           0 :         heap_->isolate(),
     344             :         "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
     345           0 :         NumberOfChunks());
     346             :   }
     347             :   // Regular chunks.
     348      627442 :   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
     349             :     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
     350      231839 :     allocator_->PerformFreeMemory(chunk);
     351      231840 :     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
     352             :   }
     353             :   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
     354             :     // The previous loop uncommitted any pages marked as pooled and added them
     355             :     // to the pooled list. In case of kReleasePooled we need to free them
     356             :     // though.
     357      334584 :     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
     358      197696 :       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
     359             :     }
     360             :   }
     361      395835 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     362      395834 : }
     363             : 
     364       63885 : void MemoryAllocator::Unmapper::TearDown() {
     365       63885 :   CHECK_EQ(0, pending_unmapping_tasks_);
     366       63885 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     367             :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     368             :     DCHECK(chunks_[i].empty());
     369             :   }
     370       63885 : }
     371             : 
     372           0 : size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
     373           0 :   base::MutexGuard guard(&mutex_);
     374           0 :   return chunks_[kRegular].size() + chunks_[kNonRegular].size();
     375             : }
     376             : 
     377           5 : int MemoryAllocator::Unmapper::NumberOfChunks() {
     378           5 :   base::MutexGuard guard(&mutex_);
     379             :   size_t result = 0;
     380          20 :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     381          30 :     result += chunks_[i].size();
     382             :   }
     383          10 :   return static_cast<int>(result);
     384             : }
     385             : 
     386           0 : size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
     387           0 :   base::MutexGuard guard(&mutex_);
     388             : 
     389             :   size_t sum = 0;
     390             :   // kPooled chunks are already uncommited. We only have to account for
     391             :   // kRegular and kNonRegular chunks.
     392           0 :   for (auto& chunk : chunks_[kRegular]) {
     393           0 :     sum += chunk->size();
     394             :   }
     395           0 :   for (auto& chunk : chunks_[kNonRegular]) {
     396           0 :     sum += chunk->size();
     397             :   }
     398           0 :   return sum;
     399             : }
     400             : 
     401       28738 : bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
     402             :   Address base = reservation->address();
     403             :   size_t size = reservation->size();
     404       28738 :   if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
     405             :     return false;
     406             :   }
     407       28738 :   UpdateAllocatedSpaceLimits(base, base + size);
     408       57476 :   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
     409       28738 :   return true;
     410             : }
     411             : 
     412      217933 : bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
     413             :   size_t size = reservation->size();
     414      217934 :   if (!reservation->SetPermissions(reservation->address(), size,
     415      217933 :                                    PageAllocator::kNoAccess)) {
     416             :     return false;
     417             :   }
     418      435868 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
     419      217933 :   return true;
     420             : }
     421             : 
     422      260564 : void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
     423             :                                  Address base, size_t size) {
     424      260564 :   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
     425      260564 : }
     426             : 
     427      700251 : Address MemoryAllocator::AllocateAlignedMemory(
     428             :     size_t reserve_size, size_t commit_size, size_t alignment,
     429      700251 :     Executability executable, void* hint, VirtualMemory* controller) {
     430             :   v8::PageAllocator* page_allocator = this->page_allocator(executable);
     431             :   DCHECK(commit_size <= reserve_size);
     432      700251 :   VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
     433      700251 :   if (!reservation.IsReserved()) return kNullAddress;
     434             :   Address base = reservation.address();
     435             :   size_ += reservation.size();
     436             : 
     437      700251 :   if (executable == EXECUTABLE) {
     438      134374 :     if (!CommitExecutableMemory(&reservation, base, commit_size,
     439      134374 :                                 reserve_size)) {
     440             :       base = kNullAddress;
     441             :     }
     442             :   } else {
     443      565877 :     if (reservation.SetPermissions(base, commit_size,
     444             :                                    PageAllocator::kReadWrite)) {
     445      565877 :       UpdateAllocatedSpaceLimits(base, base + commit_size);
     446             :     } else {
     447             :       base = kNullAddress;
     448             :     }
     449             :   }
     450             : 
     451      700251 :   if (base == kNullAddress) {
     452             :     // Failed to commit the body. Free the mapping and any partially committed
     453             :     // regions inside it.
     454           0 :     reservation.Free();
     455             :     size_ -= reserve_size;
     456           0 :     return kNullAddress;
     457             :   }
     458             : 
     459      700251 :   controller->TakeControl(&reservation);
     460      700251 :   return base;
     461             : }
     462             : 
     463     7280133 : void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
     464             :   base::AddressRegion memory_area =
     465     7280133 :       MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
     466     7280101 :   if (memory_area.size() != 0) {
     467      126010 :     MemoryAllocator* memory_allocator = heap_->memory_allocator();
     468             :     v8::PageAllocator* page_allocator =
     469             :         memory_allocator->page_allocator(executable());
     470       63005 :     CHECK(page_allocator->DiscardSystemPages(
     471             :         reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
     472             :   }
     473     7280098 : }
     474             : 
     475     9186103 : size_t MemoryChunkLayout::CodePageGuardStartOffset() {
     476             :   // We are guarding code pages: the first OS page after the header
     477             :   // will be protected as non-writable.
     478     9186090 :   return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
     479             : }
     480             : 
     481         500 : size_t MemoryChunkLayout::CodePageGuardSize() {
     482     9186599 :   return MemoryAllocator::GetCommitPageSize();
     483             : }
     484             : 
     485     8917356 : intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
     486             :   // We are guarding code pages: the first OS page after the header
     487             :   // will be protected as non-writable.
     488    17834708 :   return CodePageGuardStartOffset() + CodePageGuardSize();
     489             : }
     490             : 
     491           0 : intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
     492             :   // We are guarding code pages: the last OS page will be protected as
     493             :   // non-writable.
     494      565331 :   return Page::kPageSize -
     495      565331 :          static_cast<int>(MemoryAllocator::GetCommitPageSize());
     496             : }
     497             : 
     498      565330 : size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
     499      565331 :   size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
     500             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     501      565331 :   return memory;
     502             : }
     503             : 
     504           5 : intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
     505           5 :   return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
     506             : }
     507             : 
     508        1000 : size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
     509             :     AllocationSpace space) {
     510       29738 :   if (space == CODE_SPACE) {
     511         500 :     return ObjectStartOffsetInCodePage();
     512             :   }
     513             :   return ObjectStartOffsetInDataPage();
     514             : }
     515             : 
     516      619802 : size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
     517             :   size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
     518             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     519      619802 :   return memory;
     520             : }
     521             : 
     522     1286130 : size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     523             :     AllocationSpace space) {
     524     1784031 :   if (space == CODE_SPACE) {
     525      565331 :     return AllocatableMemoryInCodePage();
     526             :   }
     527             :   return AllocatableMemoryInDataPage();
     528             : }
     529             : 
     530           0 : Heap* MemoryChunk::synchronized_heap() {
     531             :   return reinterpret_cast<Heap*>(
     532           0 :       base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
     533             : }
     534             : 
     535           0 : void MemoryChunk::InitializationMemoryFence() {
     536             :   base::SeqCst_MemoryFence();
     537             : #ifdef THREAD_SANITIZER
     538             :   // Since TSAN does not process memory fences, we use the following annotation
     539             :   // to tell TSAN that there is no data race when emitting a
     540             :   // InitializationMemoryFence. Note that the other thread still needs to
     541             :   // perform MemoryChunk::synchronized_heap().
     542             :   base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
     543             :                       reinterpret_cast<base::AtomicWord>(heap_));
     544             : #endif
     545           0 : }
     546             : 
     547     4082063 : void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     548             :     PageAllocator::Permission permission) {
     549             :   DCHECK(permission == PageAllocator::kRead ||
     550             :          permission == PageAllocator::kReadExecute);
     551             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     552             :   DCHECK(owner()->identity() == CODE_SPACE ||
     553             :          owner()->identity() == CODE_LO_SPACE);
     554             :   // Decrementing the write_unprotect_counter_ and changing the page
     555             :   // protection mode has to be atomic.
     556     4082063 :   base::MutexGuard guard(page_protection_change_mutex_);
     557     4082064 :   if (write_unprotect_counter_ == 0) {
     558             :     // This is a corner case that may happen when we have a
     559             :     // CodeSpaceMemoryModificationScope open and this page was newly
     560             :     // added.
     561     4082065 :     return;
     562             :   }
     563     4082064 :   write_unprotect_counter_--;
     564             :   DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     565     4082064 :   if (write_unprotect_counter_ == 0) {
     566             :     Address protect_start =
     567     4005831 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     568             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     569             :     DCHECK(IsAligned(protect_start, page_size));
     570             :     size_t protect_size = RoundUp(area_size(), page_size);
     571     4005829 :     CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
     572             :   }
     573             : }
     574             : 
     575           0 : void MemoryChunk::SetReadable() {
     576           0 :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
     577           0 : }
     578             : 
     579     3293276 : void MemoryChunk::SetReadAndExecutable() {
     580             :   DCHECK(!FLAG_jitless);
     581             :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     582     4082063 :       PageAllocator::kReadExecute);
     583     3293277 : }
     584             : 
     585     4018822 : void MemoryChunk::SetReadAndWritable() {
     586             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     587             :   DCHECK(owner()->identity() == CODE_SPACE ||
     588             :          owner()->identity() == CODE_LO_SPACE);
     589             :   // Incrementing the write_unprotect_counter_ and changing the page
     590             :   // protection mode has to be atomic.
     591     4018822 :   base::MutexGuard guard(page_protection_change_mutex_);
     592     4018833 :   write_unprotect_counter_++;
     593             :   DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     594     4018833 :   if (write_unprotect_counter_ == 1) {
     595             :     Address unprotect_start =
     596     3942576 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     597             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     598             :     DCHECK(IsAligned(unprotect_start, page_size));
     599             :     size_t unprotect_size = RoundUp(area_size(), page_size);
     600     3942568 :     CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
     601             :                                       PageAllocator::kReadWrite));
     602             :   }
     603     4018834 : }
     604             : 
     605             : namespace {
     606             : 
     607             : PageAllocator::Permission DefaultWritableCodePermissions() {
     608             :   return FLAG_jitless ? PageAllocator::kReadWrite
     609           0 :                       : PageAllocator::kReadWriteExecute;
     610             : }
     611             : 
     612             : }  // namespace
     613             : 
     614      997736 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
     615             :                                      Address area_start, Address area_end,
     616      728989 :                                      Executability executable, Space* owner,
     617             :                                      VirtualMemory reservation) {
     618             :   MemoryChunk* chunk = FromAddress(base);
     619             : 
     620             :   DCHECK_EQ(base, chunk->address());
     621             : 
     622      728988 :   chunk->heap_ = heap;
     623      728988 :   chunk->size_ = size;
     624      728988 :   chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
     625             :   DCHECK(HasHeaderSentinel(area_start));
     626      728988 :   chunk->area_start_ = area_start;
     627      728988 :   chunk->area_end_ = area_end;
     628      728988 :   chunk->flags_ = Flags(NO_FLAGS);
     629             :   chunk->set_owner(owner);
     630             :   chunk->InitializeReservedMemory();
     631      728989 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
     632      728989 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
     633             :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
     634      728989 :                                        nullptr);
     635             :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
     636      728989 :                                        nullptr);
     637      728989 :   chunk->invalidated_slots_ = nullptr;
     638      728989 :   chunk->skip_list_ = nullptr;
     639             :   chunk->progress_bar_ = 0;
     640      728989 :   chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
     641             :   chunk->set_concurrent_sweeping_state(kSweepingDone);
     642      728989 :   chunk->page_protection_change_mutex_ = new base::Mutex();
     643      728988 :   chunk->write_unprotect_counter_ = 0;
     644      728988 :   chunk->mutex_ = new base::Mutex();
     645      728989 :   chunk->allocated_bytes_ = chunk->area_size();
     646      728989 :   chunk->wasted_memory_ = 0;
     647      728989 :   chunk->young_generation_bitmap_ = nullptr;
     648      728989 :   chunk->marking_bitmap_ = nullptr;
     649      728989 :   chunk->local_tracker_ = nullptr;
     650             : 
     651             :   chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
     652             :       0;
     653             :   chunk->external_backing_store_bytes_
     654             :       [ExternalBackingStoreType::kExternalString] = 0;
     655             : 
     656     5102923 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     657     4373934 :     chunk->categories_[i] = nullptr;
     658             :   }
     659             : 
     660             :   chunk->AllocateMarkingBitmap();
     661      728989 :   if (owner->identity() == RO_SPACE) {
     662             :     heap->incremental_marking()
     663             :         ->non_atomic_marking_state()
     664             :         ->bitmap(chunk)
     665       62883 :         ->MarkAllBits();
     666             :   } else {
     667             :     heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
     668             :                                                                           0);
     669             :   }
     670             : 
     671             :   DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
     672             : 
     673      728989 :   if (executable == EXECUTABLE) {
     674             :     chunk->SetFlag(IS_EXECUTABLE);
     675      134374 :     if (heap->write_protect_code_memory()) {
     676             :       chunk->write_unprotect_counter_ =
     677      134374 :           heap->code_space_memory_modification_scope_depth();
     678             :     } else {
     679             :       size_t page_size = MemoryAllocator::GetCommitPageSize();
     680             :       DCHECK(IsAligned(area_start, page_size));
     681           0 :       size_t area_size = RoundUp(area_end - area_start, page_size);
     682           0 :       CHECK(reservation.SetPermissions(area_start, area_size,
     683             :                                        DefaultWritableCodePermissions()));
     684             :     }
     685             :   }
     686             : 
     687             :   chunk->reservation_ = std::move(reservation);
     688             : 
     689      728989 :   return chunk;
     690             : }
     691             : 
     692      443978 : Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     693             :   Page* page = static_cast<Page*>(chunk);
     694             :   DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     695             :                 page->owner()->identity()),
     696             :             page->area_size());
     697             :   // Make sure that categories are initialized before freeing the area.
     698             :   page->ResetAllocatedBytes();
     699      443978 :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     700      443978 :   page->AllocateFreeListCategories();
     701             :   page->InitializeFreeListCategories();
     702             :   page->list_node().Initialize();
     703             :   page->InitializationMemoryFence();
     704      443978 :   return page;
     705             : }
     706             : 
     707      226987 : Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     708             :   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
     709             :   bool in_to_space = (id() != kFromSpace);
     710             :   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
     711      226987 :                              : MemoryChunk::IN_FROM_SPACE);
     712             :   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
     713             :                                        : MemoryChunk::IN_TO_SPACE));
     714             :   Page* page = static_cast<Page*>(chunk);
     715      226987 :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     716      226987 :   page->AllocateLocalTracker();
     717             :   page->list_node().Initialize();
     718             : #ifdef ENABLE_MINOR_MC
     719      226987 :   if (FLAG_minor_mc) {
     720             :     page->AllocateYoungGenerationBitmap();
     721             :     heap()
     722             :         ->minor_mark_compact_collector()
     723             :         ->non_atomic_marking_state()
     724           0 :         ->ClearLiveness(page);
     725             :   }
     726             : #endif  // ENABLE_MINOR_MC
     727             :   page->InitializationMemoryFence();
     728      226987 :   return page;
     729             : }
     730             : 
     731      286900 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
     732             :                                  Executability executable) {
     733       97787 :   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
     734             :     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
     735           0 :     FATAL("Code page is too large.");
     736             :   }
     737             : 
     738             :   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
     739             : 
     740             :   // Initialize the sentinel value for each page boundary since the mutator
     741             :   // may initialize the object starting from its end.
     742             :   Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
     743       57606 :                      MemoryChunk::kPageSize;
     744      246719 :   while (sentinel < chunk->area_end()) {
     745      131507 :     *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
     746      131507 :     sentinel += MemoryChunk::kPageSize;
     747             :   }
     748             : 
     749             :   LargePage* page = static_cast<LargePage*>(chunk);
     750             :   page->list_node().Initialize();
     751       57606 :   return page;
     752             : }
     753             : 
     754      443980 : void Page::AllocateFreeListCategories() {
     755     3107844 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     756             :     categories_[i] = new FreeListCategory(
     757     5327730 :         reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
     758             :   }
     759      443978 : }
     760             : 
     761          47 : void Page::InitializeFreeListCategories() {
     762     2664197 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     763     2664150 :     categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
     764             :   }
     765          47 : }
     766             : 
     767      671288 : void Page::ReleaseFreeListCategories() {
     768     4698978 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     769     4027686 :     if (categories_[i] != nullptr) {
     770     2663442 :       delete categories_[i];
     771     2663446 :       categories_[i] = nullptr;
     772             :     }
     773             :   }
     774      671292 : }
     775             : 
     776         582 : Page* Page::ConvertNewToOld(Page* old_page) {
     777             :   DCHECK(old_page);
     778             :   DCHECK(old_page->InNewSpace());
     779         582 :   OldSpace* old_space = old_page->heap()->old_space();
     780             :   old_page->set_owner(old_space);
     781             :   old_page->SetFlags(0, static_cast<uintptr_t>(~0));
     782         582 :   Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
     783         582 :   old_space->AddPage(new_page);
     784         582 :   return new_page;
     785             : }
     786             : 
     787       22457 : size_t MemoryChunk::CommittedPhysicalMemory() {
     788       24836 :   if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
     789       10039 :     return size();
     790        2379 :   return high_water_mark_;
     791             : }
     792             : 
     793       11244 : bool MemoryChunk::InOldSpace() const {
     794       11244 :   return owner()->identity() == OLD_SPACE;
     795             : }
     796             : 
     797           0 : bool MemoryChunk::InLargeObjectSpace() const {
     798           0 :   return owner()->identity() == LO_SPACE;
     799             : }
     800             : 
     801      700247 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
     802             :                                             size_t commit_area_size,
     803             :                                             Executability executable,
     804             :                                             Space* owner) {
     805             :   DCHECK_LE(commit_area_size, reserve_area_size);
     806             : 
     807             :   size_t chunk_size;
     808     1400497 :   Heap* heap = isolate_->heap();
     809             :   Address base = kNullAddress;
     810      700247 :   VirtualMemory reservation;
     811             :   Address area_start = kNullAddress;
     812             :   Address area_end = kNullAddress;
     813             :   void* address_hint =
     814             :       AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
     815             : 
     816             :   //
     817             :   // MemoryChunk layout:
     818             :   //
     819             :   //             Executable
     820             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     821             :   // |           Header           |
     822             :   // +----------------------------+<- base + CodePageGuardStartOffset
     823             :   // |           Guard            |
     824             :   // +----------------------------+<- area_start_
     825             :   // |           Area             |
     826             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     827             :   // |   Committed but not used   |
     828             :   // +----------------------------+<- aligned at OS page boundary
     829             :   // | Reserved but not committed |
     830             :   // +----------------------------+<- aligned at OS page boundary
     831             :   // |           Guard            |
     832             :   // +----------------------------+<- base + chunk_size
     833             :   //
     834             :   //           Non-executable
     835             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     836             :   // |          Header            |
     837             :   // +----------------------------+<- area_start_ (base + area_start_)
     838             :   // |           Area             |
     839             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     840             :   // |  Committed but not used    |
     841             :   // +----------------------------+<- aligned at OS page boundary
     842             :   // | Reserved but not committed |
     843             :   // +----------------------------+<- base + chunk_size
     844             :   //
     845             : 
     846      700251 :   if (executable == EXECUTABLE) {
     847      134374 :     chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
     848             :                                reserve_area_size +
     849             :                                MemoryChunkLayout::CodePageGuardSize(),
     850      134373 :                            GetCommitPageSize());
     851             : 
     852             :     // Size of header (not executable) plus area (executable).
     853             :     size_t commit_size = ::RoundUp(
     854      134373 :         MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
     855      134374 :         GetCommitPageSize());
     856             :     base =
     857             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     858      134374 :                               executable, address_hint, &reservation);
     859      134374 :     if (base == kNullAddress) return nullptr;
     860             :     // Update executable memory size.
     861             :     size_executable_ += reservation.size();
     862             : 
     863             :     if (Heap::ShouldZapGarbage()) {
     864             :       ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
     865             :       ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
     866             :                commit_area_size, kZapValue);
     867             :     }
     868             : 
     869      134374 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     870      134374 :     area_end = area_start + commit_area_size;
     871             :   } else {
     872             :     chunk_size = ::RoundUp(
     873             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
     874      565877 :         GetCommitPageSize());
     875             :     size_t commit_size = ::RoundUp(
     876             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     877      565877 :         GetCommitPageSize());
     878             :     base =
     879             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     880      565877 :                               executable, address_hint, &reservation);
     881             : 
     882      565877 :     if (base == kNullAddress) return nullptr;
     883             : 
     884             :     if (Heap::ShouldZapGarbage()) {
     885             :       ZapBlock(
     886             :           base,
     887             :           MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     888             :           kZapValue);
     889             :     }
     890             : 
     891      565877 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
     892      565877 :     area_end = area_start + commit_area_size;
     893             :   }
     894             : 
     895             :   // Use chunk_size for statistics and callbacks because we assume that they
     896             :   // treat reserved but not-yet committed memory regions of chunks as allocated.
     897             :   isolate_->counters()->memory_allocated()->Increment(
     898     1400502 :       static_cast<int>(chunk_size));
     899             : 
     900     1400500 :   LOG(isolate_,
     901             :       NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
     902             : 
     903             :   // We cannot use the last chunk in the address space because we would
     904             :   // overflow when comparing top and limit if this chunk is used for a
     905             :   // linear allocation area.
     906      700250 :   if ((base + chunk_size) == 0u) {
     907           0 :     CHECK(!last_chunk_.IsReserved());
     908           0 :     last_chunk_.TakeControl(&reservation);
     909           0 :     UncommitMemory(&last_chunk_);
     910             :     size_ -= chunk_size;
     911           0 :     if (executable == EXECUTABLE) {
     912             :       size_executable_ -= chunk_size;
     913             :     }
     914           0 :     CHECK(last_chunk_.IsReserved());
     915             :     return AllocateChunk(reserve_area_size, commit_area_size, executable,
     916           0 :                          owner);
     917             :   }
     918             : 
     919             :   MemoryChunk* chunk =
     920             :       MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
     921      700250 :                               executable, owner, std::move(reservation));
     922             : 
     923      700251 :   if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
     924      700251 :   return chunk;
     925             : }
     926             : 
     927      357909 : void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
     928      859493 :   if (is_marking) {
     929             :     SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     930             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     931             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     932             :   } else {
     933             :     ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     934             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     935             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     936             :   }
     937      357909 : }
     938             : 
     939      164456 : void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
     940             :   SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     941      391443 :   if (is_marking) {
     942             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     943             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     944             :   } else {
     945             :     ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     946             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     947             :   }
     948      164456 : }
     949             : 
     950     1495270 : void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
     951             : 
     952      243038 : void Page::AllocateLocalTracker() {
     953             :   DCHECK_NULL(local_tracker_);
     954      486077 :   local_tracker_ = new LocalArrayBufferTracker(this);
     955      243039 : }
     956             : 
     957       16304 : bool Page::contains_array_buffers() {
     958       32161 :   return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
     959             : }
     960             : 
     961           0 : void Page::ResetFreeListStatistics() {
     962      540326 :   wasted_memory_ = 0;
     963           0 : }
     964             : 
     965           0 : size_t Page::AvailableInFreeList() {
     966           0 :   size_t sum = 0;
     967           0 :   ForAllFreeListCategories([&sum](FreeListCategory* category) {
     968           0 :     sum += category->available();
     969             :   });
     970           0 :   return sum;
     971             : }
     972             : 
     973             : #ifdef DEBUG
     974             : namespace {
     975             : // Skips filler starting from the given filler until the end address.
     976             : // Returns the first address after the skipped fillers.
     977             : Address SkipFillers(HeapObject filler, Address end) {
     978             :   Address addr = filler->address();
     979             :   while (addr < end) {
     980             :     filler = HeapObject::FromAddress(addr);
     981             :     CHECK(filler->IsFiller());
     982             :     addr = filler->address() + filler->Size();
     983             :   }
     984             :   return addr;
     985             : }
     986             : }  // anonymous namespace
     987             : #endif  // DEBUG
     988             : 
     989      188346 : size_t Page::ShrinkToHighWaterMark() {
     990             :   // Shrinking only makes sense outside of the CodeRange, where we don't care
     991             :   // about address space fragmentation.
     992     1694934 :   VirtualMemory* reservation = reserved_memory();
     993      188346 :   if (!reservation->IsReserved()) return 0;
     994             : 
     995             :   // Shrink pages to high water mark. The water mark points either to a filler
     996             :   // or the area_end.
     997      376692 :   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
     998      188346 :   if (filler->address() == area_end()) return 0;
     999      188341 :   CHECK(filler->IsFiller());
    1000             :   // Ensure that no objects were allocated in [filler, area_end) region.
    1001             :   DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
    1002             :   // Ensure that no objects will be allocated on this page.
    1003             :   DCHECK_EQ(0u, AvailableInFreeList());
    1004             : 
    1005             :   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
    1006      188341 :                             MemoryAllocator::GetCommitPageSize());
    1007      188341 :   if (unused > 0) {
    1008             :     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
    1009      188321 :     if (FLAG_trace_gc_verbose) {
    1010             :       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
    1011             :                    reinterpret_cast<void*>(this),
    1012             :                    reinterpret_cast<void*>(area_end()),
    1013           0 :                    reinterpret_cast<void*>(area_end() - unused));
    1014             :     }
    1015             :     heap()->CreateFillerObjectAt(
    1016             :         filler->address(),
    1017             :         static_cast<int>(area_end() - filler->address() - unused),
    1018      564963 :         ClearRecordedSlots::kNo);
    1019             :     heap()->memory_allocator()->PartialFreeMemory(
    1020      564963 :         this, address() + size() - unused, unused, area_end() - unused);
    1021      188321 :     if (filler->address() != area_end()) {
    1022      188321 :       CHECK(filler->IsFiller());
    1023      188321 :       CHECK_EQ(filler->address() + filler->Size(), area_end());
    1024             :     }
    1025             :   }
    1026      188341 :   return unused;
    1027             : }
    1028             : 
    1029      184680 : void Page::CreateBlackArea(Address start, Address end) {
    1030             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1031             :   DCHECK_EQ(Page::FromAddress(start), this);
    1032             :   DCHECK_NE(start, end);
    1033             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1034             :   IncrementalMarking::MarkingState* marking_state =
    1035      184680 :       heap()->incremental_marking()->marking_state();
    1036             :   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
    1037      369360 :                                         AddressToMarkbitIndex(end));
    1038      184680 :   marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
    1039      184680 : }
    1040             : 
    1041       12776 : void Page::DestroyBlackArea(Address start, Address end) {
    1042             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1043             :   DCHECK_EQ(Page::FromAddress(start), this);
    1044             :   DCHECK_NE(start, end);
    1045             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1046             :   IncrementalMarking::MarkingState* marking_state =
    1047       12776 :       heap()->incremental_marking()->marking_state();
    1048             :   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
    1049       25552 :                                           AddressToMarkbitIndex(end));
    1050       12776 :   marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
    1051       12776 : }
    1052             : 
    1053      188372 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1054             :                                         size_t bytes_to_free,
    1055             :                                         Address new_area_end) {
    1056      188372 :   VirtualMemory* reservation = chunk->reserved_memory();
    1057             :   DCHECK(reservation->IsReserved());
    1058      188372 :   chunk->size_ -= bytes_to_free;
    1059      188372 :   chunk->area_end_ = new_area_end;
    1060      188372 :   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
    1061             :     // Add guard page at the end.
    1062       62772 :     size_t page_size = GetCommitPageSize();
    1063             :     DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
    1064             :     DCHECK_EQ(chunk->address() + chunk->size(),
    1065             :               chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
    1066             :     reservation->SetPermissions(chunk->area_end_, page_size,
    1067       62772 :                                 PageAllocator::kNoAccess);
    1068             :   }
    1069             :   // On e.g. Windows, a reservation may be larger than a page and releasing
    1070             :   // partially starting at |start_free| will also release the potentially
    1071             :   // unused part behind the current page.
    1072      188372 :   const size_t released_bytes = reservation->Release(start_free);
    1073             :   DCHECK_GE(size_, released_bytes);
    1074             :   size_ -= released_bytes;
    1075             :   isolate_->counters()->memory_allocated()->Decrement(
    1076      376744 :       static_cast<int>(released_bytes));
    1077      188372 : }
    1078             : 
    1079      791766 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
    1080             :   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1081      728899 :   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
    1082             : 
    1083             :   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
    1084      728899 :                                          chunk->IsEvacuationCandidate());
    1085             : 
    1086             :   VirtualMemory* reservation = chunk->reserved_memory();
    1087             :   const size_t size =
    1088      728898 :       reservation->IsReserved() ? reservation->size() : chunk->size();
    1089             :   DCHECK_GE(size_, static_cast<size_t>(size));
    1090             :   size_ -= size;
    1091     1457796 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    1092      728898 :   if (chunk->executable() == EXECUTABLE) {
    1093             :     DCHECK_GE(size_executable_, size);
    1094             :     size_executable_ -= size;
    1095             :   }
    1096             : 
    1097             :   chunk->SetFlag(MemoryChunk::PRE_FREED);
    1098             : 
    1099      728898 :   if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
    1100      728898 : }
    1101             : 
    1102             : 
    1103      846133 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
    1104             :   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1105      720397 :   chunk->ReleaseAllocatedMemory();
    1106             : 
    1107      720397 :   VirtualMemory* reservation = chunk->reserved_memory();
    1108      720397 :   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
    1109      217934 :     UncommitMemory(reservation);
    1110             :   } else {
    1111      502463 :     if (reservation->IsReserved()) {
    1112      439595 :       reservation->Free();
    1113             :     } else {
    1114             :       // Only read-only pages can have non-initialized reservation object.
    1115             :       DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
    1116             :       FreeMemory(page_allocator(chunk->executable()), chunk->address(),
    1117       62868 :                  chunk->size());
    1118             :     }
    1119             :   }
    1120      720399 : }
    1121             : 
    1122             : template <MemoryAllocator::FreeMode mode>
    1123      926595 : void MemoryAllocator::Free(MemoryChunk* chunk) {
    1124             :   switch (mode) {
    1125             :     case kFull:
    1126      481476 :       PreFreeMemory(chunk);
    1127      481476 :       PerformFreeMemory(chunk);
    1128             :       break;
    1129             :     case kAlreadyPooled:
    1130             :       // Pooled pages cannot be touched anymore as their memory is uncommitted.
    1131             :       // Pooled pages are not-executable.
    1132      197696 :       FreeMemory(data_page_allocator(), chunk->address(),
    1133             :                  static_cast<size_t>(MemoryChunk::kPageSize));
    1134             :       break;
    1135             :     case kPooledAndQueue:
    1136             :       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
    1137             :       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
    1138             :       chunk->SetFlag(MemoryChunk::POOLED);
    1139             :       V8_FALLTHROUGH;
    1140             :     case kPreFreeAndQueue:
    1141      247423 :       PreFreeMemory(chunk);
    1142             :       // The chunks added to this queue will be freed by a concurrent thread.
    1143      247423 :       unmapper()->AddMemoryChunkSafe(chunk);
    1144             :       break;
    1145             :   }
    1146      926595 : }
    1147             : 
    1148             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1149             :     MemoryAllocator::kFull>(MemoryChunk* chunk);
    1150             : 
    1151             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1152             :     MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
    1153             : 
    1154             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1155             :     MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
    1156             : 
    1157             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1158             :     MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
    1159             : 
    1160             : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
    1161      670380 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
    1162             :                                     Executability executable) {
    1163             :   MemoryChunk* chunk = nullptr;
    1164             :   if (alloc_mode == kPooled) {
    1165             :     DCHECK_EQ(size, static_cast<size_t>(
    1166             :                         MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1167             :                             owner->identity())));
    1168             :     DCHECK_EQ(executable, NOT_EXECUTABLE);
    1169      226986 :     chunk = AllocatePagePooled(owner);
    1170             :   }
    1171      226987 :   if (chunk == nullptr) {
    1172      641643 :     chunk = AllocateChunk(size, size, executable, owner);
    1173             :   }
    1174      670383 :   if (chunk == nullptr) return nullptr;
    1175      670383 :   return owner->InitializePage(chunk, executable);
    1176             : }
    1177             : 
    1178             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1179             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1180             :         size_t size, PagedSpace* owner, Executability executable);
    1181             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1182             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1183             :         size_t size, SemiSpace* owner, Executability executable);
    1184             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1185             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1186             :         size_t size, SemiSpace* owner, Executability executable);
    1187             : 
    1188       57606 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
    1189             :                                               LargeObjectSpace* owner,
    1190             :                                               Executability executable) {
    1191       57606 :   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
    1192       57606 :   if (chunk == nullptr) return nullptr;
    1193       57606 :   return LargePage::Initialize(isolate_->heap(), chunk, executable);
    1194             : }
    1195             : 
    1196             : template <typename SpaceType>
    1197      255724 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
    1198      226986 :   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
    1199      226987 :   if (chunk == nullptr) return nullptr;
    1200             :   const int size = MemoryChunk::kPageSize;
    1201       28738 :   const Address start = reinterpret_cast<Address>(chunk);
    1202             :   const Address area_start =
    1203             :       start +
    1204       57476 :       MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
    1205       28738 :   const Address area_end = start + size;
    1206             :   // Pooled pages are always regular data pages.
    1207             :   DCHECK_NE(CODE_SPACE, owner->identity());
    1208             :   VirtualMemory reservation(data_page_allocator(), start, size);
    1209       28738 :   if (!CommitMemory(&reservation)) return nullptr;
    1210             :   if (Heap::ShouldZapGarbage()) {
    1211             :     ZapBlock(start, size, kZapValue);
    1212             :   }
    1213       28738 :   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
    1214       57476 :                           NOT_EXECUTABLE, owner, std::move(reservation));
    1215             :   size_ += size;
    1216       28738 :   return chunk;
    1217             : }
    1218             : 
    1219           0 : void MemoryAllocator::ZapBlock(Address start, size_t size,
    1220             :                                uintptr_t zap_value) {
    1221             :   DCHECK(IsAligned(start, kTaggedSize));
    1222             :   DCHECK(IsAligned(size, kTaggedSize));
    1223             :   MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
    1224           0 :                size >> kTaggedSizeLog2);
    1225           0 : }
    1226             : 
    1227           5 : intptr_t MemoryAllocator::GetCommitPageSize() {
    1228    36091407 :   if (FLAG_v8_os_page_size != 0) {
    1229             :     DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
    1230        1523 :     return FLAG_v8_os_page_size * KB;
    1231             :   } else {
    1232    36089884 :     return CommitPageSize();
    1233             :   }
    1234             : }
    1235             : 
    1236     7281300 : base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
    1237             :                                                               size_t size) {
    1238     7281305 :   size_t page_size = MemoryAllocator::GetCommitPageSize();
    1239     7281305 :   if (size < page_size + FreeSpace::kSize) {
    1240     7208824 :     return base::AddressRegion(0, 0);
    1241             :   }
    1242       72481 :   Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
    1243       72481 :   Address discardable_end = RoundDown(addr + size, page_size);
    1244       72481 :   if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
    1245             :   return base::AddressRegion(discardable_start,
    1246       63020 :                              discardable_end - discardable_start);
    1247             : }
    1248             : 
    1249      134374 : bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
    1250             :                                              size_t commit_size,
    1251             :                                              size_t reserved_size) {
    1252      134374 :   const size_t page_size = GetCommitPageSize();
    1253             :   // All addresses and sizes must be aligned to the commit page size.
    1254             :   DCHECK(IsAligned(start, page_size));
    1255             :   DCHECK_EQ(0, commit_size % page_size);
    1256             :   DCHECK_EQ(0, reserved_size % page_size);
    1257             :   const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
    1258      134374 :   const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
    1259             :   const size_t code_area_offset =
    1260      134374 :       MemoryChunkLayout::ObjectStartOffsetInCodePage();
    1261             :   // reserved_size includes two guard regions, commit_size does not.
    1262             :   DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
    1263      134374 :   const Address pre_guard_page = start + pre_guard_offset;
    1264      134374 :   const Address code_area = start + code_area_offset;
    1265      134374 :   const Address post_guard_page = start + reserved_size - guard_size;
    1266             :   // Commit the non-executable header, from start to pre-code guard page.
    1267      134374 :   if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
    1268             :     // Create the pre-code guard page, following the header.
    1269      134374 :     if (vm->SetPermissions(pre_guard_page, page_size,
    1270             :                            PageAllocator::kNoAccess)) {
    1271             :       // Commit the executable code body.
    1272      134374 :       if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
    1273      134374 :                              PageAllocator::kReadWrite)) {
    1274             :         // Create the post-code guard page.
    1275      134374 :         if (vm->SetPermissions(post_guard_page, page_size,
    1276             :                                PageAllocator::kNoAccess)) {
    1277      134374 :           UpdateAllocatedSpaceLimits(start, code_area + commit_size);
    1278      134374 :           return true;
    1279             :         }
    1280           0 :         vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
    1281             :       }
    1282             :     }
    1283           0 :     vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
    1284             :   }
    1285             :   return false;
    1286             : }
    1287             : 
    1288             : 
    1289             : // -----------------------------------------------------------------------------
    1290             : // MemoryChunk implementation
    1291             : 
    1292      728893 : void MemoryChunk::ReleaseAllocatedMemory() {
    1293      728893 :   if (skip_list_ != nullptr) {
    1294       93678 :     delete skip_list_;
    1295       93678 :     skip_list_ = nullptr;
    1296             :   }
    1297      728893 :   if (mutex_ != nullptr) {
    1298      666028 :     delete mutex_;
    1299      666029 :     mutex_ = nullptr;
    1300             :   }
    1301      728894 :   if (page_protection_change_mutex_ != nullptr) {
    1302      728896 :     delete page_protection_change_mutex_;
    1303      728896 :     page_protection_change_mutex_ = nullptr;
    1304             :   }
    1305      728894 :   ReleaseSlotSet<OLD_TO_NEW>();
    1306      728894 :   ReleaseSlotSet<OLD_TO_OLD>();
    1307      728888 :   ReleaseTypedSlotSet<OLD_TO_NEW>();
    1308      728884 :   ReleaseTypedSlotSet<OLD_TO_OLD>();
    1309      728883 :   ReleaseInvalidatedSlots();
    1310      728884 :   if (local_tracker_ != nullptr) ReleaseLocalTracker();
    1311      728898 :   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
    1312      728898 :   if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
    1313             : 
    1314      728898 :   if (!heap_->IsLargeMemoryChunk(this)) {
    1315             :     Page* page = static_cast<Page*>(this);
    1316      671293 :     page->ReleaseFreeListCategories();
    1317             :   }
    1318      728897 : }
    1319             : 
    1320      101867 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
    1321      101867 :   size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
    1322             :   DCHECK_LT(0, pages);
    1323      101867 :   SlotSet* slot_set = new SlotSet[pages];
    1324      205841 :   for (size_t i = 0; i < pages; i++) {
    1325      103968 :     slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
    1326             :   }
    1327      101873 :   return slot_set;
    1328             : }
    1329             : 
    1330             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
    1331             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
    1332             : 
    1333             : template <RememberedSetType type>
    1334      101872 : SlotSet* MemoryChunk::AllocateSlotSet() {
    1335      101872 :   SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
    1336             :   SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
    1337      101873 :       &slot_set_[type], nullptr, slot_set);
    1338      101873 :   if (old_slot_set != nullptr) {
    1339          64 :     delete[] slot_set;
    1340             :     slot_set = old_slot_set;
    1341             :   }
    1342             :   DCHECK(slot_set);
    1343      101873 :   return slot_set;
    1344             : }
    1345             : 
    1346             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
    1347             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
    1348             : 
    1349             : template <RememberedSetType type>
    1350     1475667 : void MemoryChunk::ReleaseSlotSet() {
    1351     1475667 :   SlotSet* slot_set = slot_set_[type];
    1352     1475667 :   if (slot_set) {
    1353      101802 :     slot_set_[type] = nullptr;
    1354      101802 :     delete[] slot_set;
    1355             :   }
    1356     1475680 : }
    1357             : 
    1358             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
    1359             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
    1360             : 
    1361             : template <RememberedSetType type>
    1362       10457 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
    1363       10457 :   TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
    1364             :   TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
    1365       10457 :       &typed_slot_set_[type], nullptr, typed_slot_set);
    1366       10457 :   if (old_value != nullptr) {
    1367           0 :     delete typed_slot_set;
    1368             :     typed_slot_set = old_value;
    1369             :   }
    1370             :   DCHECK(typed_slot_set);
    1371       10457 :   return typed_slot_set;
    1372             : }
    1373             : 
    1374             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
    1375             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
    1376             : 
    1377             : template <RememberedSetType type>
    1378     1460725 : void MemoryChunk::ReleaseTypedSlotSet() {
    1379     1460725 :   TypedSlotSet* typed_slot_set = typed_slot_set_[type];
    1380     1460725 :   if (typed_slot_set) {
    1381       10457 :     typed_slot_set_[type] = nullptr;
    1382       10457 :     delete typed_slot_set;
    1383             :   }
    1384     1460724 : }
    1385             : 
    1386         137 : InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
    1387             :   DCHECK_NULL(invalidated_slots_);
    1388         274 :   invalidated_slots_ = new InvalidatedSlots();
    1389         137 :   return invalidated_slots_;
    1390             : }
    1391             : 
    1392      729018 : void MemoryChunk::ReleaseInvalidatedSlots() {
    1393      729018 :   if (invalidated_slots_) {
    1394         274 :     delete invalidated_slots_;
    1395         137 :     invalidated_slots_ = nullptr;
    1396             :   }
    1397      729018 : }
    1398             : 
    1399       46958 : void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
    1400      109719 :                                                      int size) {
    1401       46958 :   if (!ShouldSkipEvacuationSlotRecording()) {
    1402       36573 :     if (invalidated_slots() == nullptr) {
    1403         137 :       AllocateInvalidatedSlots();
    1404             :     }
    1405       36573 :     int old_size = (*invalidated_slots())[object];
    1406       73146 :     (*invalidated_slots())[object] = std::max(old_size, size);
    1407             :   }
    1408       46958 : }
    1409             : 
    1410           0 : bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
    1411           0 :   if (ShouldSkipEvacuationSlotRecording()) {
    1412             :     // Invalidated slots do not matter if we are not recording slots.
    1413             :     return true;
    1414             :   }
    1415           0 :   if (invalidated_slots() == nullptr) {
    1416             :     return false;
    1417             :   }
    1418             :   return invalidated_slots()->find(object) != invalidated_slots()->end();
    1419             : }
    1420             : 
    1421           5 : void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
    1422           5 :                                                  HeapObject new_start) {
    1423             :   DCHECK_LT(old_start, new_start);
    1424             :   DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
    1425             :             MemoryChunk::FromHeapObject(new_start));
    1426          10 :   if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
    1427             :     auto it = invalidated_slots()->find(old_start);
    1428           0 :     if (it != invalidated_slots()->end()) {
    1429           0 :       int old_size = it->second;
    1430           0 :       int delta = static_cast<int>(new_start->address() - old_start->address());
    1431             :       invalidated_slots()->erase(it);
    1432           0 :       (*invalidated_slots())[new_start] = old_size - delta;
    1433             :     }
    1434             :   }
    1435           5 : }
    1436             : 
    1437      243001 : void MemoryChunk::ReleaseLocalTracker() {
    1438             :   DCHECK_NOT_NULL(local_tracker_);
    1439      243001 :   delete local_tracker_;
    1440      243011 :   local_tracker_ = nullptr;
    1441      243011 : }
    1442             : 
    1443           0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
    1444             :   DCHECK_NULL(young_generation_bitmap_);
    1445           0 :   young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1446           0 : }
    1447             : 
    1448           0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
    1449             :   DCHECK_NOT_NULL(young_generation_bitmap_);
    1450           0 :   free(young_generation_bitmap_);
    1451           0 :   young_generation_bitmap_ = nullptr;
    1452           0 : }
    1453             : 
    1454           0 : void MemoryChunk::AllocateMarkingBitmap() {
    1455             :   DCHECK_NULL(marking_bitmap_);
    1456      728989 :   marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1457           0 : }
    1458             : 
    1459           0 : void MemoryChunk::ReleaseMarkingBitmap() {
    1460             :   DCHECK_NOT_NULL(marking_bitmap_);
    1461      728898 :   free(marking_bitmap_);
    1462      728898 :   marking_bitmap_ = nullptr;
    1463           0 : }
    1464             : 
    1465             : // -----------------------------------------------------------------------------
    1466             : // PagedSpace implementation
    1467             : 
    1468      318476 : void Space::AddAllocationObserver(AllocationObserver* observer) {
    1469      318476 :   allocation_observers_.push_back(observer);
    1470      318476 :   StartNextInlineAllocationStep();
    1471      318476 : }
    1472             : 
    1473      312133 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
    1474             :   auto it = std::find(allocation_observers_.begin(),
    1475      312132 :                       allocation_observers_.end(), observer);
    1476             :   DCHECK(allocation_observers_.end() != it);
    1477      312132 :   allocation_observers_.erase(it);
    1478      312132 :   StartNextInlineAllocationStep();
    1479      312132 : }
    1480             : 
    1481      865104 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
    1482             : 
    1483      324414 : void Space::ResumeAllocationObservers() {
    1484      865104 :   allocation_observers_paused_ = false;
    1485      324414 : }
    1486             : 
    1487   126152148 : void Space::AllocationStep(int bytes_since_last, Address soon_object,
    1488    67269717 :                            int size) {
    1489   126152148 :   if (!AllocationObserversActive()) {
    1490   126152148 :     return;
    1491             :   }
    1492             : 
    1493             :   DCHECK(!heap()->allocation_step_in_progress());
    1494             :   heap()->set_allocation_step_in_progress(true);
    1495    22423239 :   heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
    1496    67486072 :   for (AllocationObserver* observer : allocation_observers_) {
    1497    22639592 :     observer->AllocationStep(bytes_since_last, soon_object, size);
    1498             :   }
    1499             :   heap()->set_allocation_step_in_progress(false);
    1500             : }
    1501             : 
    1502           0 : intptr_t Space::GetNextInlineAllocationStepSize() {
    1503             :   intptr_t next_step = 0;
    1504    89716453 :   for (AllocationObserver* observer : allocation_observers_) {
    1505             :     next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
    1506    22478106 :                           : observer->bytes_to_next_step();
    1507             :   }
    1508             :   DCHECK(allocation_observers_.size() == 0 || next_step > 0);
    1509           0 :   return next_step;
    1510             : }
    1511             : 
    1512      497901 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
    1513             :                        Executability executable)
    1514     1493702 :     : SpaceWithLinearArea(heap, space), executable_(executable) {
    1515      497901 :   area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
    1516             :   accounting_stats_.Clear();
    1517      497901 : }
    1518             : 
    1519      497839 : void PagedSpace::TearDown() {
    1520     1423442 :   while (!memory_chunk_list_.Empty()) {
    1521             :     MemoryChunk* chunk = memory_chunk_list_.front();
    1522      427762 :     memory_chunk_list_.Remove(chunk);
    1523      427764 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
    1524             :   }
    1525             :   accounting_stats_.Clear();
    1526      497841 : }
    1527             : 
    1528      401344 : void PagedSpace::RefillFreeList() {
    1529             :   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
    1530             :   // generation spaces out.
    1531      999067 :   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
    1532      401344 :       identity() != MAP_SPACE && identity() != RO_SPACE) {
    1533      401440 :     return;
    1534             :   }
    1535     1320575 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    1536             :   size_t added = 0;
    1537             :   {
    1538      524571 :     Page* p = nullptr;
    1539     1320575 :     while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
    1540             :       // Only during compaction pages can actually change ownership. This is
    1541             :       // safe because there exists no other competing action on the page links
    1542             :       // during compaction.
    1543      524571 :       if (is_local()) {
    1544             :         DCHECK_NE(this, p->owner());
    1545             :         PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
    1546       47269 :         base::MutexGuard guard(owner->mutex());
    1547       47269 :         owner->RefineAllocatedBytesAfterSweeping(p);
    1548       47269 :         owner->RemovePage(p);
    1549       47269 :         added += AddPage(p);
    1550             :       } else {
    1551      477302 :         base::MutexGuard guard(mutex());
    1552             :         DCHECK_EQ(this, p->owner());
    1553      477302 :         RefineAllocatedBytesAfterSweeping(p);
    1554      477302 :         added += RelinkFreeListCategories(p);
    1555             :       }
    1556      524571 :       added += p->wasted_memory();
    1557      524571 :       if (is_local() && (added > kCompactionMemoryWanted)) break;
    1558             :     }
    1559             :   }
    1560             : }
    1561             : 
    1562      246359 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
    1563      246359 :   base::MutexGuard guard(mutex());
    1564             : 
    1565             :   DCHECK(identity() == other->identity());
    1566             :   // Unmerged fields:
    1567             :   //   area_size_
    1568      246359 :   other->FreeLinearAllocationArea();
    1569             : 
    1570             :   // The linear allocation area of {other} should be destroyed now.
    1571             :   DCHECK_EQ(kNullAddress, other->top());
    1572             :   DCHECK_EQ(kNullAddress, other->limit());
    1573             : 
    1574             :   // Move over pages.
    1575      341316 :   for (auto it = other->begin(); it != other->end();) {
    1576             :     Page* p = *(it++);
    1577             :     // Relinking requires the category to be unlinked.
    1578       94957 :     other->RemovePage(p);
    1579       94957 :     AddPage(p);
    1580             :     DCHECK_EQ(p->AvailableInFreeList(),
    1581             :               p->AvailableInFreeListFromAllocatedBytes());
    1582             :   }
    1583             :   DCHECK_EQ(0u, other->Size());
    1584             :   DCHECK_EQ(0u, other->Capacity());
    1585      246359 : }
    1586             : 
    1587             : 
    1588        1004 : size_t PagedSpace::CommittedPhysicalMemory() {
    1589        1004 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    1590        1004 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1591             :   size_t size = 0;
    1592        2695 :   for (Page* page : *this) {
    1593        1691 :     size += page->CommittedPhysicalMemory();
    1594             :   }
    1595             :   return size;
    1596             : }
    1597             : 
    1598          20 : bool PagedSpace::ContainsSlow(Address addr) {
    1599             :   Page* p = Page::FromAddress(addr);
    1600         285 :   for (Page* page : *this) {
    1601         280 :     if (page == p) return true;
    1602             :   }
    1603             :   return false;
    1604             : }
    1605             : 
    1606     1049142 : void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
    1607      524571 :   CHECK(page->SweepingDone());
    1608             :   auto marking_state =
    1609       12438 :       heap()->incremental_marking()->non_atomic_marking_state();
    1610             :   // The live_byte on the page was accounted in the space allocated
    1611             :   // bytes counter. After sweeping allocated_bytes() contains the
    1612             :   // accurate live byte count on the page.
    1613             :   size_t old_counter = marking_state->live_bytes(page);
    1614             :   size_t new_counter = page->allocated_bytes();
    1615             :   DCHECK_GE(old_counter, new_counter);
    1616      524571 :   if (old_counter > new_counter) {
    1617       12438 :     DecreaseAllocatedBytes(old_counter - new_counter, page);
    1618             :     // Give the heap a chance to adjust counters in response to the
    1619             :     // more precise and smaller old generation size.
    1620             :     heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
    1621             :   }
    1622             :   marking_state->SetLiveBytes(page, 0);
    1623      524571 : }
    1624             : 
    1625       24969 : Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
    1626       24969 :   base::MutexGuard guard(mutex());
    1627             :   // Check for pages that still contain free list entries. Bail out for smaller
    1628             :   // categories.
    1629             :   const int minimum_category =
    1630       49994 :       static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
    1631             :   Page* page = free_list()->GetPageForCategoryType(kHuge);
    1632       24997 :   if (!page && static_cast<int>(kLarge) >= minimum_category)
    1633             :     page = free_list()->GetPageForCategoryType(kLarge);
    1634       24997 :   if (!page && static_cast<int>(kMedium) >= minimum_category)
    1635             :     page = free_list()->GetPageForCategoryType(kMedium);
    1636       24997 :   if (!page && static_cast<int>(kSmall) >= minimum_category)
    1637             :     page = free_list()->GetPageForCategoryType(kSmall);
    1638       24997 :   if (!page && static_cast<int>(kTiny) >= minimum_category)
    1639             :     page = free_list()->GetPageForCategoryType(kTiny);
    1640       24997 :   if (!page && static_cast<int>(kTiniest) >= minimum_category)
    1641             :     page = free_list()->GetPageForCategoryType(kTiniest);
    1642       24997 :   if (!page) return nullptr;
    1643       18591 :   RemovePage(page);
    1644       18591 :   return page;
    1645             : }
    1646             : 
    1647     1209567 : size_t PagedSpace::AddPage(Page* page) {
    1648     1209564 :   CHECK(page->SweepingDone());
    1649      604782 :   page->set_owner(this);
    1650             :   memory_chunk_list_.PushBack(page);
    1651             :   AccountCommitted(page->size());
    1652             :   IncreaseCapacity(page->area_size());
    1653             :   IncreaseAllocatedBytes(page->allocated_bytes(), page);
    1654     1814348 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1655     1209565 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1656     1209565 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1657             :   }
    1658      604783 :   return RelinkFreeListCategories(page);
    1659             : }
    1660             : 
    1661      321634 : void PagedSpace::RemovePage(Page* page) {
    1662      321634 :   CHECK(page->SweepingDone());
    1663      160817 :   memory_chunk_list_.Remove(page);
    1664             :   UnlinkFreeListCategories(page);
    1665             :   DecreaseAllocatedBytes(page->allocated_bytes(), page);
    1666             :   DecreaseCapacity(page->area_size());
    1667             :   AccountUncommitted(page->size());
    1668      482451 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1669      321634 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1670      321634 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1671             :   }
    1672      160817 : }
    1673             : 
    1674      188346 : size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
    1675      188346 :   size_t unused = page->ShrinkToHighWaterMark();
    1676             :   accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
    1677             :   AccountUncommitted(unused);
    1678      188346 :   return unused;
    1679             : }
    1680             : 
    1681         400 : void PagedSpace::ResetFreeList() {
    1682      379133 :   for (Page* page : *this) {
    1683      190417 :     free_list_.EvictFreeListItems(page);
    1684             :   }
    1685             :   DCHECK(free_list_.IsEmpty());
    1686         400 : }
    1687             : 
    1688      188316 : void PagedSpace::ShrinkImmortalImmovablePages() {
    1689             :   DCHECK(!heap()->deserialization_complete());
    1690      188316 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1691      188316 :   FreeLinearAllocationArea();
    1692             :   ResetFreeList();
    1693      376642 :   for (Page* page : *this) {
    1694             :     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
    1695      188326 :     ShrinkPageToHighWaterMark(page);
    1696             :   }
    1697      188316 : }
    1698             : 
    1699     1330269 : bool PagedSpace::Expand() {
    1700             :   // Always lock against the main space as we can only adjust capacity and
    1701             :   // pages concurrently for the main paged space.
    1702     2217092 :   base::MutexGuard guard(heap()->paged_space(identity())->mutex());
    1703             : 
    1704             :   const int size = AreaSize();
    1705             : 
    1706      886896 :   if (!heap()->CanExpandOldGeneration(size)) return false;
    1707             : 
    1708             :   Page* page =
    1709      443382 :       heap()->memory_allocator()->AllocatePage(size, this, executable());
    1710      443384 :   if (page == nullptr) return false;
    1711             :   // Pages created during bootstrapping may contain immortal immovable objects.
    1712      443384 :   if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
    1713      443384 :   AddPage(page);
    1714             :   Free(page->area_start(), page->area_size(),
    1715      443383 :        SpaceAccountingMode::kSpaceAccounted);
    1716      443384 :   return true;
    1717             : }
    1718             : 
    1719             : 
    1720      168109 : int PagedSpace::CountTotalPages() {
    1721             :   int count = 0;
    1722      516962 :   for (Page* page : *this) {
    1723      348853 :     count++;
    1724             :     USE(page);
    1725             :   }
    1726      168109 :   return count;
    1727             : }
    1728             : 
    1729             : 
    1730      250476 : void PagedSpace::ResetFreeListStatistics() {
    1731      790802 :   for (Page* page : *this) {
    1732             :     page->ResetFreeListStatistics();
    1733             :   }
    1734      250476 : }
    1735             : 
    1736     1332018 : void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
    1737             :   SetTopAndLimit(top, limit);
    1738     2664036 :   if (top != kNullAddress && top != limit &&
    1739     1332018 :       heap()->incremental_marking()->black_allocation()) {
    1740      156965 :     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
    1741             :   }
    1742     1332018 : }
    1743             : 
    1744    22148118 : void PagedSpace::DecreaseLimit(Address new_limit) {
    1745             :   Address old_limit = limit();
    1746             :   DCHECK_LE(top(), new_limit);
    1747             :   DCHECK_GE(old_limit, new_limit);
    1748    22148118 :   if (new_limit != old_limit) {
    1749             :     SetTopAndLimit(top(), new_limit);
    1750             :     Free(new_limit, old_limit - new_limit,
    1751       38536 :          SpaceAccountingMode::kSpaceAccounted);
    1752       38536 :     if (heap()->incremental_marking()->black_allocation()) {
    1753             :       Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
    1754       11735 :                                                                    old_limit);
    1755             :     }
    1756             :   }
    1757    22148118 : }
    1758             : 
    1759    24161560 : Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
    1760             :                                           size_t min_size) {
    1761             :   DCHECK_GE(end - start, min_size);
    1762             : 
    1763    46443695 :   if (heap()->inline_allocation_disabled()) {
    1764             :     // Fit the requested area exactly.
    1765      310531 :     return start + min_size;
    1766    46666552 :   } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
    1767             :     // Generated code may allocate inline from the linear allocation area for.
    1768             :     // To make sure we can observe these allocations, we use a lower limit.
    1769    22282135 :     size_t step = GetNextInlineAllocationStepSize();
    1770             : 
    1771             :     // TODO(ofrobots): there is subtle difference between old space and new
    1772             :     // space here. Any way to avoid it? `step - 1` makes more sense as we would
    1773             :     // like to sample the object that straddles the `start + step` boundary.
    1774             :     // Rounding down further would introduce a small statistical error in
    1775             :     // sampling. However, presently PagedSpace requires limit to be aligned.
    1776             :     size_t rounded_step;
    1777    22282135 :     if (identity() == NEW_SPACE) {
    1778             :       DCHECK_GE(step, 1);
    1779      438543 :       rounded_step = step - 1;
    1780             :     } else {
    1781    21843592 :       rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
    1782             :     }
    1783    44564266 :     return Min(static_cast<Address>(start + min_size + rounded_step), end);
    1784             :   } else {
    1785             :     // The entire node can be used as the linear allocation area.
    1786             :     return end;
    1787             :   }
    1788             : }
    1789             : 
    1790       84732 : void PagedSpace::MarkLinearAllocationAreaBlack() {
    1791             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1792             :   Address current_top = top();
    1793             :   Address current_limit = limit();
    1794       84732 :   if (current_top != kNullAddress && current_top != current_limit) {
    1795             :     Page::FromAllocationAreaAddress(current_top)
    1796       27076 :         ->CreateBlackArea(current_top, current_limit);
    1797             :   }
    1798       84732 : }
    1799             : 
    1800        1908 : void PagedSpace::UnmarkLinearAllocationArea() {
    1801             :   Address current_top = top();
    1802             :   Address current_limit = limit();
    1803        1908 :   if (current_top != kNullAddress && current_top != current_limit) {
    1804             :     Page::FromAllocationAreaAddress(current_top)
    1805        1041 :         ->DestroyBlackArea(current_top, current_limit);
    1806             :   }
    1807        1908 : }
    1808             : 
    1809     2711456 : void PagedSpace::FreeLinearAllocationArea() {
    1810             :   // Mark the old linear allocation area with a free space map so it can be
    1811             :   // skipped when scanning the heap.
    1812             :   Address current_top = top();
    1813             :   Address current_limit = limit();
    1814     2711456 :   if (current_top == kNullAddress) {
    1815             :     DCHECK_EQ(kNullAddress, current_limit);
    1816     2711452 :     return;
    1817             :   }
    1818             : 
    1819     2529491 :   if (heap()->incremental_marking()->black_allocation()) {
    1820      128627 :     Page* page = Page::FromAllocationAreaAddress(current_top);
    1821             : 
    1822             :     // Clear the bits in the unused black area.
    1823      182359 :     if (current_top != current_limit) {
    1824             :       IncrementalMarking::MarkingState* marking_state =
    1825             :           heap()->incremental_marking()->marking_state();
    1826             :       marking_state->bitmap(page)->ClearRange(
    1827             :           page->AddressToMarkbitIndex(current_top),
    1828      257254 :           page->AddressToMarkbitIndex(current_limit));
    1829             :       marking_state->IncrementLiveBytes(
    1830      128627 :           page, -static_cast<int>(current_limit - current_top));
    1831             :     }
    1832             :   }
    1833             : 
    1834     1216151 :   InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
    1835             :   SetTopAndLimit(kNullAddress, kNullAddress);
    1836             :   DCHECK_GE(current_limit, current_top);
    1837             : 
    1838             :   // The code page of the linear allocation area needs to be unprotected
    1839             :   // because we are going to write a filler into that memory area below.
    1840     1216147 :   if (identity() == CODE_SPACE) {
    1841             :     heap()->UnprotectAndRegisterMemoryChunk(
    1842       97193 :         MemoryChunk::FromAddress(current_top));
    1843             :   }
    1844             :   Free(current_top, current_limit - current_top,
    1845     1216147 :        SpaceAccountingMode::kSpaceAccounted);
    1846             : }
    1847             : 
    1848       16152 : void PagedSpace::ReleasePage(Page* page) {
    1849             :   DCHECK_EQ(
    1850             :       0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
    1851             :              page));
    1852             :   DCHECK_EQ(page->owner(), this);
    1853             : 
    1854       16152 :   free_list_.EvictFreeListItems(page);
    1855             :   DCHECK(!free_list_.ContainsPageFreeListItems(page));
    1856             : 
    1857       32304 :   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
    1858             :     DCHECK(!top_on_previous_step_);
    1859             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    1860             :   }
    1861             : 
    1862       32304 :   AccountUncommitted(page->size());
    1863             :   accounting_stats_.DecreaseCapacity(page->area_size());
    1864       16152 :   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    1865       16152 : }
    1866             : 
    1867           0 : void PagedSpace::SetReadable() {
    1868             :   DCHECK(identity() == CODE_SPACE);
    1869           0 :   for (Page* page : *this) {
    1870           0 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1871           0 :     page->SetReadable();
    1872             :   }
    1873           0 : }
    1874             : 
    1875      465720 : void PagedSpace::SetReadAndExecutable() {
    1876             :   DCHECK(identity() == CODE_SPACE);
    1877     1254508 :   for (Page* page : *this) {
    1878     1577569 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1879      788787 :     page->SetReadAndExecutable();
    1880             :   }
    1881      465726 : }
    1882             : 
    1883      465722 : void PagedSpace::SetReadAndWritable() {
    1884             :   DCHECK(identity() == CODE_SPACE);
    1885     1191261 :   for (Page* page : *this) {
    1886     1451073 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1887      725538 :     page->SetReadAndWritable();
    1888             :   }
    1889      465726 : }
    1890             : 
    1891       31412 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
    1892       31412 :   return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
    1893             : }
    1894             : 
    1895     1932189 : bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
    1896             :   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
    1897             :   DCHECK_LE(top(), limit());
    1898             : #ifdef DEBUG
    1899             :   if (top() != limit()) {
    1900             :     DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
    1901             :   }
    1902             : #endif
    1903             :   // Don't free list allocate if there is linear space available.
    1904             :   DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
    1905             : 
    1906             :   // Mark the old linear allocation area with a free space map so it can be
    1907             :   // skipped when scanning the heap.  This also puts it back in the free list
    1908             :   // if it is big enough.
    1909     1932189 :   FreeLinearAllocationArea();
    1910             : 
    1911     1932159 :   if (!is_local()) {
    1912             :     heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    1913             :         heap()->GCFlagsForIncrementalMarking(),
    1914     3477823 :         kGCCallbackScheduleIdleGarbageCollection);
    1915             :   }
    1916             : 
    1917     1932141 :   size_t new_node_size = 0;
    1918     1932141 :   FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
    1919     1932238 :   if (new_node.is_null()) return false;
    1920             : 
    1921             :   DCHECK_GE(new_node_size, size_in_bytes);
    1922             : 
    1923             :   // The old-space-step might have finished sweeping and restarted marking.
    1924             :   // Verify that it did not turn the page of the new node into an evacuation
    1925             :   // candidate.
    1926             :   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
    1927             : 
    1928             :   // Memory in the linear allocation area is counted as allocated.  We may free
    1929             :   // a little of this again immediately - see below.
    1930             :   Page* page = Page::FromHeapObject(new_node);
    1931     1332024 :   IncreaseAllocatedBytes(new_node_size, page);
    1932             : 
    1933             :   Address start = new_node->address();
    1934     1332024 :   Address end = new_node->address() + new_node_size;
    1935     1332024 :   Address limit = ComputeLimit(start, end, size_in_bytes);
    1936             :   DCHECK_LE(limit, end);
    1937             :   DCHECK_LE(size_in_bytes, limit - start);
    1938     1332023 :   if (limit != end) {
    1939      238692 :     if (identity() == CODE_SPACE) {
    1940        2164 :       heap()->UnprotectAndRegisterMemoryChunk(page);
    1941             :     }
    1942      238692 :     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
    1943             :   }
    1944     1332019 :   SetLinearAllocationArea(start, limit);
    1945             : 
    1946     1332020 :   return true;
    1947             : }
    1948             : 
    1949             : #ifdef DEBUG
    1950             : void PagedSpace::Print() {}
    1951             : #endif
    1952             : 
    1953             : #ifdef VERIFY_HEAP
    1954             : void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
    1955             :   bool allocation_pointer_found_in_space =
    1956             :       (allocation_info_.top() == allocation_info_.limit());
    1957             :   size_t external_space_bytes[kNumTypes];
    1958             :   size_t external_page_bytes[kNumTypes];
    1959             : 
    1960             :   for (int i = 0; i < kNumTypes; i++) {
    1961             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1962             :   }
    1963             : 
    1964             :   for (Page* page : *this) {
    1965             :     CHECK(page->owner() == this);
    1966             : 
    1967             :     for (int i = 0; i < kNumTypes; i++) {
    1968             :       external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1969             :     }
    1970             : 
    1971             :     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
    1972             :       allocation_pointer_found_in_space = true;
    1973             :     }
    1974             :     CHECK(page->SweepingDone());
    1975             :     HeapObjectIterator it(page);
    1976             :     Address end_of_previous_object = page->area_start();
    1977             :     Address top = page->area_end();
    1978             : 
    1979             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    1980             :       CHECK(end_of_previous_object <= object->address());
    1981             : 
    1982             :       // The first word should be a map, and we expect all map pointers to
    1983             :       // be in map space.
    1984             :       Map map = object->map();
    1985             :       CHECK(map->IsMap());
    1986             :       CHECK(heap()->map_space()->Contains(map) ||
    1987             :             heap()->read_only_space()->Contains(map));
    1988             : 
    1989             :       // Perform space-specific object verification.
    1990             :       VerifyObject(object);
    1991             : 
    1992             :       // The object itself should look OK.
    1993             :       object->ObjectVerify(isolate);
    1994             : 
    1995             :       if (!FLAG_verify_heap_skip_remembered_set) {
    1996             :         heap()->VerifyRememberedSetFor(object);
    1997             :       }
    1998             : 
    1999             :       // All the interior pointers should be contained in the heap.
    2000             :       int size = object->Size();
    2001             :       object->IterateBody(map, size, visitor);
    2002             :       CHECK(object->address() + size <= top);
    2003             :       end_of_previous_object = object->address() + size;
    2004             : 
    2005             :       if (object->IsExternalString()) {
    2006             :         ExternalString external_string = ExternalString::cast(object);
    2007             :         size_t size = external_string->ExternalPayloadSize();
    2008             :         external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
    2009             :       } else if (object->IsJSArrayBuffer()) {
    2010             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2011             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2012             :           size_t size = array_buffer->byte_length();
    2013             :           external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2014             :         }
    2015             :       }
    2016             :     }
    2017             :     for (int i = 0; i < kNumTypes; i++) {
    2018             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2019             :       CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
    2020             :       external_space_bytes[t] += external_page_bytes[t];
    2021             :     }
    2022             :   }
    2023             :   for (int i = 0; i < kNumTypes; i++) {
    2024             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2025             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2026             :   }
    2027             :   CHECK(allocation_pointer_found_in_space);
    2028             : #ifdef DEBUG
    2029             :   VerifyCountersAfterSweeping();
    2030             : #endif
    2031             : }
    2032             : 
    2033             : void PagedSpace::VerifyLiveBytes() {
    2034             :   IncrementalMarking::MarkingState* marking_state =
    2035             :       heap()->incremental_marking()->marking_state();
    2036             :   for (Page* page : *this) {
    2037             :     CHECK(page->SweepingDone());
    2038             :     HeapObjectIterator it(page);
    2039             :     int black_size = 0;
    2040             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2041             :       // All the interior pointers should be contained in the heap.
    2042             :       if (marking_state->IsBlack(object)) {
    2043             :         black_size += object->Size();
    2044             :       }
    2045             :     }
    2046             :     CHECK_LE(black_size, marking_state->live_bytes(page));
    2047             :   }
    2048             : }
    2049             : #endif  // VERIFY_HEAP
    2050             : 
    2051             : #ifdef DEBUG
    2052             : void PagedSpace::VerifyCountersAfterSweeping() {
    2053             :   size_t total_capacity = 0;
    2054             :   size_t total_allocated = 0;
    2055             :   for (Page* page : *this) {
    2056             :     DCHECK(page->SweepingDone());
    2057             :     total_capacity += page->area_size();
    2058             :     HeapObjectIterator it(page);
    2059             :     size_t real_allocated = 0;
    2060             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2061             :       if (!object->IsFiller()) {
    2062             :         real_allocated += object->Size();
    2063             :       }
    2064             :     }
    2065             :     total_allocated += page->allocated_bytes();
    2066             :     // The real size can be smaller than the accounted size if array trimming,
    2067             :     // object slack tracking happened after sweeping.
    2068             :     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
    2069             :     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
    2070             :   }
    2071             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2072             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2073             : }
    2074             : 
    2075             : void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
    2076             :   // We need to refine the counters on pages that are already swept and have
    2077             :   // not been moved over to the actual space. Otherwise, the AccountingStats
    2078             :   // are just an over approximation.
    2079             :   RefillFreeList();
    2080             : 
    2081             :   size_t total_capacity = 0;
    2082             :   size_t total_allocated = 0;
    2083             :   auto marking_state =
    2084             :       heap()->incremental_marking()->non_atomic_marking_state();
    2085             :   for (Page* page : *this) {
    2086             :     size_t page_allocated =
    2087             :         page->SweepingDone()
    2088             :             ? page->allocated_bytes()
    2089             :             : static_cast<size_t>(marking_state->live_bytes(page));
    2090             :     total_capacity += page->area_size();
    2091             :     total_allocated += page_allocated;
    2092             :     DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
    2093             :   }
    2094             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2095             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2096             : }
    2097             : #endif
    2098             : 
    2099             : // -----------------------------------------------------------------------------
    2100             : // NewSpace implementation
    2101             : 
    2102       62888 : NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2103             :                    size_t initial_semispace_capacity,
    2104             :                    size_t max_semispace_capacity)
    2105             :     : SpaceWithLinearArea(heap, NEW_SPACE),
    2106             :       to_space_(heap, kToSpace),
    2107      125776 :       from_space_(heap, kFromSpace) {
    2108             :   DCHECK(initial_semispace_capacity <= max_semispace_capacity);
    2109             :   DCHECK(
    2110             :       base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
    2111             : 
    2112             :   to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2113             :   from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2114       62888 :   if (!to_space_.Commit()) {
    2115           0 :     V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
    2116             :   }
    2117             :   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
    2118       62888 :   ResetLinearAllocationArea();
    2119       62888 : }
    2120             : 
    2121       62878 : void NewSpace::TearDown() {
    2122             :   allocation_info_.Reset(kNullAddress, kNullAddress);
    2123             : 
    2124       62878 :   to_space_.TearDown();
    2125       62878 :   from_space_.TearDown();
    2126       62878 : }
    2127             : 
    2128      107086 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
    2129             : 
    2130             : 
    2131        1948 : void NewSpace::Grow() {
    2132             :   // Double the semispace size but only up to maximum capacity.
    2133             :   DCHECK(TotalCapacity() < MaximumCapacity());
    2134             :   size_t new_capacity =
    2135             :       Min(MaximumCapacity(),
    2136        3896 :           static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
    2137        1948 :   if (to_space_.GrowTo(new_capacity)) {
    2138             :     // Only grow from space if we managed to grow to-space.
    2139        1948 :     if (!from_space_.GrowTo(new_capacity)) {
    2140             :       // If we managed to grow to-space but couldn't grow from-space,
    2141             :       // attempt to shrink to-space.
    2142           0 :       if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
    2143             :         // We are in an inconsistent state because we could not
    2144             :         // commit/uncommit memory from new space.
    2145           0 :         FATAL("inconsistent state");
    2146             :       }
    2147             :     }
    2148             :   }
    2149             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2150        1948 : }
    2151             : 
    2152             : 
    2153       26747 : void NewSpace::Shrink() {
    2154       26747 :   size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
    2155             :   size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
    2156       26853 :   if (rounded_new_capacity < TotalCapacity() &&
    2157         106 :       to_space_.ShrinkTo(rounded_new_capacity)) {
    2158             :     // Only shrink from-space if we managed to shrink to-space.
    2159           0 :     from_space_.Reset();
    2160         106 :     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
    2161             :       // If we managed to shrink to-space but couldn't shrink from
    2162             :       // space, attempt to grow to-space again.
    2163           0 :       if (!to_space_.GrowTo(from_space_.current_capacity())) {
    2164             :         // We are in an inconsistent state because we could not
    2165             :         // commit/uncommit memory from new space.
    2166           0 :         FATAL("inconsistent state");
    2167             :       }
    2168             :     }
    2169             :   }
    2170             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2171       26747 : }
    2172             : 
    2173       83492 : bool NewSpace::Rebalance() {
    2174             :   // Order here is important to make use of the page pool.
    2175      166984 :   return to_space_.EnsureCurrentCapacity() &&
    2176      166984 :          from_space_.EnsureCurrentCapacity();
    2177             : }
    2178             : 
    2179      166984 : bool SemiSpace::EnsureCurrentCapacity() {
    2180      166984 :   if (is_committed()) {
    2181             :     const int expected_pages =
    2182      166984 :         static_cast<int>(current_capacity_ / Page::kPageSize);
    2183             :     MemoryChunk* current_page = first_page();
    2184             :     int actual_pages = 0;
    2185             : 
    2186             :     // First iterate through the pages list until expected pages if so many
    2187             :     // pages exist.
    2188      759783 :     while (current_page != nullptr && actual_pages < expected_pages) {
    2189      425815 :       actual_pages++;
    2190      425815 :       current_page = current_page->list_node().next();
    2191             :     }
    2192             : 
    2193             :     // Free all overallocated pages which are behind current_page.
    2194      168449 :     while (current_page) {
    2195        1465 :       MemoryChunk* next_current = current_page->list_node().next();
    2196        1465 :       memory_chunk_list_.Remove(current_page);
    2197             :       // Clear new space flags to avoid this page being treated as a new
    2198             :       // space page that is potentially being swept.
    2199             :       current_page->SetFlags(0, Page::kIsInNewSpaceMask);
    2200             :       heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    2201        5559 :           current_page);
    2202             :       current_page = next_current;
    2203             :     }
    2204             : 
    2205             :     // Add more pages if we have less than expected_pages.
    2206             :     IncrementalMarking::NonAtomicMarkingState* marking_state =
    2207             :         heap()->incremental_marking()->non_atomic_marking_state();
    2208      169031 :     while (actual_pages < expected_pages) {
    2209        2047 :       actual_pages++;
    2210             :       current_page =
    2211             :           heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2212             :               MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2213        2047 :               NOT_EXECUTABLE);
    2214        2047 :       if (current_page == nullptr) return false;
    2215             :       DCHECK_NOT_NULL(current_page);
    2216             :       memory_chunk_list_.PushBack(current_page);
    2217        2047 :       marking_state->ClearLiveness(current_page);
    2218             :       current_page->SetFlags(first_page()->GetFlags(),
    2219        2047 :                              static_cast<uintptr_t>(Page::kCopyAllFlags));
    2220             :       heap()->CreateFillerObjectAt(current_page->area_start(),
    2221             :                                    static_cast<int>(current_page->area_size()),
    2222        4094 :                                    ClearRecordedSlots::kNo);
    2223             :     }
    2224             :   }
    2225             :   return true;
    2226             : }
    2227             : 
    2228     1145492 : LinearAllocationArea LocalAllocationBuffer::Close() {
    2229     1145492 :   if (IsValid()) {
    2230             :     heap_->CreateFillerObjectAt(
    2231             :         allocation_info_.top(),
    2232      103836 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2233      103836 :         ClearRecordedSlots::kNo);
    2234      103837 :     const LinearAllocationArea old_info = allocation_info_;
    2235      103837 :     allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
    2236      103837 :     return old_info;
    2237             :   }
    2238     1041656 :   return LinearAllocationArea(kNullAddress, kNullAddress);
    2239             : }
    2240             : 
    2241      412116 : LocalAllocationBuffer::LocalAllocationBuffer(
    2242             :     Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    2243             :     : heap_(heap),
    2244      412116 :       allocation_info_(allocation_info) {
    2245      412116 :   if (IsValid()) {
    2246             :     heap_->CreateFillerObjectAt(
    2247             :         allocation_info_.top(),
    2248      203122 :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2249      203122 :         ClearRecordedSlots::kNo);
    2250             :   }
    2251      412110 : }
    2252             : 
    2253      203319 : LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
    2254             :     V8_NOEXCEPT {
    2255             :   *this = other;
    2256      203318 : }
    2257             : 
    2258      203590 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
    2259             :     const LocalAllocationBuffer& other) V8_NOEXCEPT {
    2260      406909 :   Close();
    2261      406908 :   heap_ = other.heap_;
    2262      406908 :   allocation_info_ = other.allocation_info_;
    2263             : 
    2264             :   // This is needed since we (a) cannot yet use move-semantics, and (b) want
    2265             :   // to make the use of the class easy by it as value and (c) implicitly call
    2266             :   // {Close} upon copy.
    2267             :   const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
    2268             :       kNullAddress, kNullAddress);
    2269      203590 :   return *this;
    2270             : }
    2271             : 
    2272      231906 : void NewSpace::UpdateLinearAllocationArea() {
    2273             :   // Make sure there is no unaccounted allocations.
    2274             :   DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
    2275             : 
    2276      463812 :   Address new_top = to_space_.page_low();
    2277      231906 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2278             :   allocation_info_.Reset(new_top, to_space_.page_high());
    2279             :   // The order of the following two stores is important.
    2280             :   // See the corresponding loads in ConcurrentMarking::Run.
    2281             :   original_limit_.store(limit(), std::memory_order_relaxed);
    2282             :   original_top_.store(top(), std::memory_order_release);
    2283      231906 :   StartNextInlineAllocationStep();
    2284             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2285      231905 : }
    2286             : 
    2287      169974 : void NewSpace::ResetLinearAllocationArea() {
    2288             :   // Do a step to account for memory allocated so far before resetting.
    2289      169974 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2290             :   to_space_.Reset();
    2291      169974 :   UpdateLinearAllocationArea();
    2292             :   // Clear all mark-bits in the to-space.
    2293             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2294      427145 :       heap()->incremental_marking()->non_atomic_marking_state();
    2295      597118 :   for (Page* p : to_space_) {
    2296      427144 :     marking_state->ClearLiveness(p);
    2297             :     // Concurrent marking may have local live bytes for this page.
    2298      427145 :     heap()->concurrent_marking()->ClearMemoryChunkData(p);
    2299             :   }
    2300      169974 : }
    2301             : 
    2302      681416 : void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2303     1362832 :   Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
    2304             :   allocation_info_.set_limit(new_limit);
    2305             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2306      681416 : }
    2307             : 
    2308    22148122 : void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2309    22148122 :   Address new_limit = ComputeLimit(top(), limit(), min_size);
    2310             :   DCHECK_LE(new_limit, limit());
    2311    22148119 :   DecreaseLimit(new_limit);
    2312    22148115 : }
    2313             : 
    2314       82206 : bool NewSpace::AddFreshPage() {
    2315       82206 :   Address top = allocation_info_.top();
    2316             :   DCHECK(!OldSpace::IsAtPageStart(top));
    2317             : 
    2318             :   // Do a step to account for memory allocated on previous page.
    2319       82206 :   InlineAllocationStep(top, top, kNullAddress, 0);
    2320             : 
    2321       82206 :   if (!to_space_.AdvancePage()) {
    2322             :     // No more pages left to advance.
    2323             :     return false;
    2324             :   }
    2325             : 
    2326             :   // Clear remainder of current page.
    2327       61932 :   Address limit = Page::FromAllocationAreaAddress(top)->area_end();
    2328       61932 :   int remaining_in_page = static_cast<int>(limit - top);
    2329       61932 :   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
    2330       61932 :   UpdateLinearAllocationArea();
    2331             : 
    2332       61932 :   return true;
    2333             : }
    2334             : 
    2335             : 
    2336           0 : bool NewSpace::AddFreshPageSynchronized() {
    2337           0 :   base::MutexGuard guard(&mutex_);
    2338           0 :   return AddFreshPage();
    2339             : }
    2340             : 
    2341             : 
    2342      362121 : bool NewSpace::EnsureAllocation(int size_in_bytes,
    2343             :                                 AllocationAlignment alignment) {
    2344      765530 :   Address old_top = allocation_info_.top();
    2345      423626 :   Address high = to_space_.page_high();
    2346      362121 :   int filler_size = Heap::GetFillToAlign(old_top, alignment);
    2347      362121 :   int aligned_size_in_bytes = size_in_bytes + filler_size;
    2348             : 
    2349      362121 :   if (old_top + aligned_size_in_bytes > high) {
    2350             :     // Not enough room in the page, try to allocate a new one.
    2351       81722 :     if (!AddFreshPage()) {
    2352             :       return false;
    2353             :     }
    2354             : 
    2355             :     old_top = allocation_info_.top();
    2356             :     high = to_space_.page_high();
    2357       61505 :     filler_size = Heap::GetFillToAlign(old_top, alignment);
    2358             :   }
    2359             : 
    2360             :   DCHECK(old_top + aligned_size_in_bytes <= high);
    2361             : 
    2362      341904 :   if (allocation_info_.limit() < high) {
    2363             :     // Either the limit has been lowered because linear allocation was disabled
    2364             :     // or because incremental marking wants to get a chance to do a step,
    2365             :     // or because idle scavenge job wants to get a chance to post a task.
    2366             :     // Set the new limit accordingly.
    2367      303793 :     Address new_top = old_top + aligned_size_in_bytes;
    2368      303793 :     Address soon_object = old_top + filler_size;
    2369      303793 :     InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
    2370      303793 :     UpdateInlineAllocationLimit(aligned_size_in_bytes);
    2371             :   }
    2372             :   return true;
    2373             : }
    2374             : 
    2375      107710 : size_t LargeObjectSpace::Available() {
    2376             :   // We return zero here since we cannot take advantage of already allocated
    2377             :   // large object memory.
    2378      107710 :   return 0;
    2379             : }
    2380             : 
    2381   126475537 : void SpaceWithLinearArea::StartNextInlineAllocationStep() {
    2382   126475537 :   if (heap()->allocation_step_in_progress()) {
    2383             :     // If we are mid-way through an existing step, don't start a new one.
    2384   126475532 :     return;
    2385             :   }
    2386             : 
    2387   126475596 :   if (AllocationObserversActive()) {
    2388    21968754 :     top_on_previous_step_ = top();
    2389    21968754 :     UpdateInlineAllocationLimit(0);
    2390             :   } else {
    2391             :     DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2392             :   }
    2393             : }
    2394             : 
    2395      222637 : void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
    2396      222637 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2397      222637 :   Space::AddAllocationObserver(observer);
    2398             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2399      222637 : }
    2400             : 
    2401      218667 : void SpaceWithLinearArea::RemoveAllocationObserver(
    2402             :     AllocationObserver* observer) {
    2403             :   Address top_for_next_step =
    2404      437334 :       allocation_observers_.size() == 1 ? kNullAddress : top();
    2405      218667 :   InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
    2406      218667 :   Space::RemoveAllocationObserver(observer);
    2407             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2408      218666 : }
    2409             : 
    2410      540690 : void SpaceWithLinearArea::PauseAllocationObservers() {
    2411             :   // Do a step to account for memory allocated so far.
    2412      540690 :   InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
    2413             :   Space::PauseAllocationObservers();
    2414             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2415      540690 :   UpdateInlineAllocationLimit(0);
    2416      540690 : }
    2417             : 
    2418      540690 : void SpaceWithLinearArea::ResumeAllocationObservers() {
    2419             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2420             :   Space::ResumeAllocationObservers();
    2421      540690 :   StartNextInlineAllocationStep();
    2422      540690 : }
    2423             : 
    2424     2754119 : void SpaceWithLinearArea::InlineAllocationStep(Address top,
    2425             :                                                Address top_for_next_step,
    2426             :                                                Address soon_object,
    2427             :                                                size_t size) {
    2428     2754119 :   if (heap()->allocation_step_in_progress()) {
    2429             :     // Avoid starting a new step if we are mid-way through an existing one.
    2430     2754119 :     return;
    2431             :   }
    2432             : 
    2433     2754119 :   if (top_on_previous_step_) {
    2434      832919 :     if (top < top_on_previous_step_) {
    2435             :       // Generated code decreased the top pointer to do folded allocations.
    2436             :       DCHECK_NE(top, kNullAddress);
    2437             :       DCHECK_EQ(Page::FromAllocationAreaAddress(top),
    2438             :                 Page::FromAllocationAreaAddress(top_on_previous_step_));
    2439           0 :       top_on_previous_step_ = top;
    2440             :     }
    2441      832919 :     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    2442      832919 :     AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
    2443      832919 :     top_on_previous_step_ = top_for_next_step;
    2444             :   }
    2445             : }
    2446             : 
    2447        7853 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
    2448        7853 :   return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
    2449             : }
    2450             : 
    2451             : #ifdef VERIFY_HEAP
    2452             : // We do not use the SemiSpaceIterator because verification doesn't assume
    2453             : // that it works (it depends on the invariants we are checking).
    2454             : void NewSpace::Verify(Isolate* isolate) {
    2455             :   // The allocation pointer should be in the space or at the very end.
    2456             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2457             : 
    2458             :   // There should be objects packed in from the low address up to the
    2459             :   // allocation pointer.
    2460             :   Address current = to_space_.first_page()->area_start();
    2461             :   CHECK_EQ(current, to_space_.space_start());
    2462             : 
    2463             :   size_t external_space_bytes[kNumTypes];
    2464             :   for (int i = 0; i < kNumTypes; i++) {
    2465             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2466             :   }
    2467             : 
    2468             :   while (current != top()) {
    2469             :     if (!Page::IsAlignedToPageSize(current)) {
    2470             :       // The allocation pointer should not be in the middle of an object.
    2471             :       CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
    2472             :             current < top());
    2473             : 
    2474             :       HeapObject object = HeapObject::FromAddress(current);
    2475             : 
    2476             :       // The first word should be a map, and we expect all map pointers to
    2477             :       // be in map space or read-only space.
    2478             :       Map map = object->map();
    2479             :       CHECK(map->IsMap());
    2480             :       CHECK(heap()->map_space()->Contains(map) ||
    2481             :             heap()->read_only_space()->Contains(map));
    2482             : 
    2483             :       // The object should not be code or a map.
    2484             :       CHECK(!object->IsMap());
    2485             :       CHECK(!object->IsAbstractCode());
    2486             : 
    2487             :       // The object itself should look OK.
    2488             :       object->ObjectVerify(isolate);
    2489             : 
    2490             :       // All the interior pointers should be contained in the heap.
    2491             :       VerifyPointersVisitor visitor(heap());
    2492             :       int size = object->Size();
    2493             :       object->IterateBody(map, size, &visitor);
    2494             : 
    2495             :       if (object->IsExternalString()) {
    2496             :         ExternalString external_string = ExternalString::cast(object);
    2497             :         size_t size = external_string->ExternalPayloadSize();
    2498             :         external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
    2499             :       } else if (object->IsJSArrayBuffer()) {
    2500             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2501             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2502             :           size_t size = array_buffer->byte_length();
    2503             :           external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2504             :         }
    2505             :       }
    2506             : 
    2507             :       current += size;
    2508             :     } else {
    2509             :       // At end of page, switch to next page.
    2510             :       Page* page = Page::FromAllocationAreaAddress(current)->next_page();
    2511             :       current = page->area_start();
    2512             :     }
    2513             :   }
    2514             : 
    2515             :   for (int i = 0; i < kNumTypes; i++) {
    2516             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2517             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2518             :   }
    2519             : 
    2520             :   // Check semi-spaces.
    2521             :   CHECK_EQ(from_space_.id(), kFromSpace);
    2522             :   CHECK_EQ(to_space_.id(), kToSpace);
    2523             :   from_space_.Verify();
    2524             :   to_space_.Verify();
    2525             : }
    2526             : #endif
    2527             : 
    2528             : // -----------------------------------------------------------------------------
    2529             : // SemiSpace implementation
    2530             : 
    2531           0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
    2532             :   DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
    2533      125776 :   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
    2534      125776 :   current_capacity_ = minimum_capacity_;
    2535      125776 :   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
    2536       62888 :   committed_ = false;
    2537           0 : }
    2538             : 
    2539             : 
    2540      125756 : void SemiSpace::TearDown() {
    2541             :   // Properly uncommit memory to keep the allocator counters in sync.
    2542      125756 :   if (is_committed()) {
    2543       78142 :     Uncommit();
    2544             :   }
    2545      125756 :   current_capacity_ = maximum_capacity_ = 0;
    2546           0 : }
    2547             : 
    2548             : 
    2549      103514 : bool SemiSpace::Commit() {
    2550             :   DCHECK(!is_committed());
    2551      103514 :   const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
    2552      311618 :   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
    2553             :     Page* new_page =
    2554             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2555             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2556      208104 :             NOT_EXECUTABLE);
    2557      208104 :     if (new_page == nullptr) {
    2558           0 :       if (pages_added) RewindPages(pages_added);
    2559             :       return false;
    2560             :     }
    2561             :     memory_chunk_list_.PushBack(new_page);
    2562             :   }
    2563             :   Reset();
    2564      103514 :   AccountCommitted(current_capacity_);
    2565      103514 :   if (age_mark_ == kNullAddress) {
    2566       80897 :     age_mark_ = first_page()->area_start();
    2567             :   }
    2568      103514 :   committed_ = true;
    2569      103514 :   return true;
    2570             : }
    2571             : 
    2572             : 
    2573      103499 : bool SemiSpace::Uncommit() {
    2574             :   DCHECK(is_committed());
    2575      431112 :   while (!memory_chunk_list_.Empty()) {
    2576             :     MemoryChunk* chunk = memory_chunk_list_.front();
    2577      224114 :     memory_chunk_list_.Remove(chunk);
    2578      327613 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
    2579             :   }
    2580      103499 :   current_page_ = nullptr;
    2581      103499 :   AccountUncommitted(current_capacity_);
    2582      103499 :   committed_ = false;
    2583      103499 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2584      103498 :   return true;
    2585             : }
    2586             : 
    2587             : 
    2588         342 : size_t SemiSpace::CommittedPhysicalMemory() {
    2589         342 :   if (!is_committed()) return 0;
    2590             :   size_t size = 0;
    2591        1030 :   for (Page* p : *this) {
    2592         688 :     size += p->CommittedPhysicalMemory();
    2593             :   }
    2594             :   return size;
    2595             : }
    2596             : 
    2597        3896 : bool SemiSpace::GrowTo(size_t new_capacity) {
    2598        3896 :   if (!is_committed()) {
    2599          91 :     if (!Commit()) return false;
    2600             :   }
    2601             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2602             :   DCHECK_LE(new_capacity, maximum_capacity_);
    2603             :   DCHECK_GT(new_capacity, current_capacity_);
    2604        3896 :   const size_t delta = new_capacity - current_capacity_;
    2605             :   DCHECK(IsAligned(delta, AllocatePageSize()));
    2606        3896 :   const int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2607             :   DCHECK(last_page());
    2608             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2609       16836 :       heap()->incremental_marking()->non_atomic_marking_state();
    2610       20732 :   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
    2611             :     Page* new_page =
    2612             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2613             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2614       16836 :             NOT_EXECUTABLE);
    2615       16836 :     if (new_page == nullptr) {
    2616           0 :       if (pages_added) RewindPages(pages_added);
    2617             :       return false;
    2618             :     }
    2619             :     memory_chunk_list_.PushBack(new_page);
    2620       16836 :     marking_state->ClearLiveness(new_page);
    2621             :     // Duplicate the flags that was set on the old page.
    2622       16836 :     new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
    2623             :   }
    2624             :   AccountCommitted(delta);
    2625        3896 :   current_capacity_ = new_capacity;
    2626        3896 :   return true;
    2627             : }
    2628             : 
    2629         212 : void SemiSpace::RewindPages(int num_pages) {
    2630             :   DCHECK_GT(num_pages, 0);
    2631             :   DCHECK(last_page());
    2632        1220 :   while (num_pages > 0) {
    2633             :     MemoryChunk* last = last_page();
    2634         796 :     memory_chunk_list_.Remove(last);
    2635         796 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
    2636         796 :     num_pages--;
    2637             :   }
    2638         212 : }
    2639             : 
    2640         212 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
    2641             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2642             :   DCHECK_GE(new_capacity, minimum_capacity_);
    2643             :   DCHECK_LT(new_capacity, current_capacity_);
    2644         212 :   if (is_committed()) {
    2645         212 :     const size_t delta = current_capacity_ - new_capacity;
    2646             :     DCHECK(IsAligned(delta, Page::kPageSize));
    2647         212 :     int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2648         212 :     RewindPages(delta_pages);
    2649         212 :     AccountUncommitted(delta);
    2650         212 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2651             :   }
    2652         212 :   current_capacity_ = new_capacity;
    2653         212 :   return true;
    2654             : }
    2655             : 
    2656      214172 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
    2657      815992 :   for (Page* page : *this) {
    2658      601820 :     page->set_owner(this);
    2659      601820 :     page->SetFlags(flags, mask);
    2660      601820 :     if (id_ == kToSpace) {
    2661             :       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
    2662             :       page->SetFlag(MemoryChunk::IN_TO_SPACE);
    2663             :       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2664             :       heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
    2665             :           page, 0);
    2666             :     } else {
    2667             :       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
    2668             :       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
    2669             :     }
    2670             :     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
    2671             :            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
    2672             :   }
    2673      214172 : }
    2674             : 
    2675             : 
    2676           0 : void SemiSpace::Reset() {
    2677             :   DCHECK(first_page());
    2678             :   DCHECK(last_page());
    2679      273594 :   current_page_ = first_page();
    2680      273594 :   pages_used_ = 0;
    2681           0 : }
    2682             : 
    2683        2047 : void SemiSpace::RemovePage(Page* page) {
    2684        2047 :   if (current_page_ == page) {
    2685         287 :     if (page->prev_page()) {
    2686         282 :       current_page_ = page->prev_page();
    2687             :     }
    2688             :   }
    2689        2047 :   memory_chunk_list_.Remove(page);
    2690        6141 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2691        4094 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2692        4094 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2693             :   }
    2694        2047 : }
    2695             : 
    2696        1465 : void SemiSpace::PrependPage(Page* page) {
    2697             :   page->SetFlags(current_page()->GetFlags(),
    2698        1465 :                  static_cast<uintptr_t>(Page::kCopyAllFlags));
    2699        1465 :   page->set_owner(this);
    2700             :   memory_chunk_list_.PushFront(page);
    2701        1465 :   pages_used_++;
    2702        4395 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2703        2930 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2704        2930 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2705             :   }
    2706        1465 : }
    2707             : 
    2708      107086 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
    2709             :   // We won't be swapping semispaces without data in them.
    2710             :   DCHECK(from->first_page());
    2711             :   DCHECK(to->first_page());
    2712             : 
    2713      107086 :   intptr_t saved_to_space_flags = to->current_page()->GetFlags();
    2714             : 
    2715             :   // We swap all properties but id_.
    2716             :   std::swap(from->current_capacity_, to->current_capacity_);
    2717             :   std::swap(from->maximum_capacity_, to->maximum_capacity_);
    2718             :   std::swap(from->minimum_capacity_, to->minimum_capacity_);
    2719             :   std::swap(from->age_mark_, to->age_mark_);
    2720             :   std::swap(from->committed_, to->committed_);
    2721             :   std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
    2722             :   std::swap(from->current_page_, to->current_page_);
    2723             :   std::swap(from->external_backing_store_bytes_,
    2724             :             to->external_backing_store_bytes_);
    2725             : 
    2726      107086 :   to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
    2727      107086 :   from->FixPagesFlags(0, 0);
    2728      107086 : }
    2729             : 
    2730      107086 : void SemiSpace::set_age_mark(Address mark) {
    2731             :   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
    2732      107086 :   age_mark_ = mark;
    2733             :   // Mark all pages up to the one containing mark.
    2734      228239 :   for (Page* p : PageRange(space_start(), mark)) {
    2735             :     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2736             :   }
    2737      107086 : }
    2738             : 
    2739           0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
    2740             :   // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
    2741           0 :   UNREACHABLE();
    2742             : }
    2743             : 
    2744             : #ifdef DEBUG
    2745             : void SemiSpace::Print() {}
    2746             : #endif
    2747             : 
    2748             : #ifdef VERIFY_HEAP
    2749             : void SemiSpace::Verify() {
    2750             :   bool is_from_space = (id_ == kFromSpace);
    2751             :   size_t external_backing_store_bytes[kNumTypes];
    2752             : 
    2753             :   for (int i = 0; i < kNumTypes; i++) {
    2754             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2755             :   }
    2756             : 
    2757             :   for (Page* page : *this) {
    2758             :     CHECK_EQ(page->owner(), this);
    2759             :     CHECK(page->InNewSpace());
    2760             :     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
    2761             :                                         : MemoryChunk::IN_TO_SPACE));
    2762             :     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
    2763             :                                          : MemoryChunk::IN_FROM_SPACE));
    2764             :     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    2765             :     if (!is_from_space) {
    2766             :       // The pointers-from-here-are-interesting flag isn't updated dynamically
    2767             :       // on from-space pages, so it might be out of sync with the marking state.
    2768             :       if (page->heap()->incremental_marking()->IsMarking()) {
    2769             :         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2770             :       } else {
    2771             :         CHECK(
    2772             :             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2773             :       }
    2774             :     }
    2775             :     for (int i = 0; i < kNumTypes; i++) {
    2776             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2777             :       external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
    2778             :     }
    2779             : 
    2780             :     CHECK_IMPLIES(page->list_node().prev(),
    2781             :                   page->list_node().prev()->list_node().next() == page);
    2782             :   }
    2783             :   for (int i = 0; i < kNumTypes; i++) {
    2784             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2785             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    2786             :   }
    2787             : }
    2788             : #endif
    2789             : 
    2790             : #ifdef DEBUG
    2791             : void SemiSpace::AssertValidRange(Address start, Address end) {
    2792             :   // Addresses belong to same semi-space
    2793             :   Page* page = Page::FromAllocationAreaAddress(start);
    2794             :   Page* end_page = Page::FromAllocationAreaAddress(end);
    2795             :   SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
    2796             :   DCHECK_EQ(space, end_page->owner());
    2797             :   // Start address is before end address, either on same page,
    2798             :   // or end address is on a later page in the linked list of
    2799             :   // semi-space pages.
    2800             :   if (page == end_page) {
    2801             :     DCHECK_LE(start, end);
    2802             :   } else {
    2803             :     while (page != end_page) {
    2804             :       page = page->next_page();
    2805             :     }
    2806             :     DCHECK(page);
    2807             :   }
    2808             : }
    2809             : #endif
    2810             : 
    2811             : 
    2812             : // -----------------------------------------------------------------------------
    2813             : // SemiSpaceIterator implementation.
    2814             : 
    2815        7853 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
    2816             :   Initialize(space->first_allocatable_address(), space->top());
    2817           0 : }
    2818             : 
    2819             : 
    2820           0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
    2821             :   SemiSpace::AssertValidRange(start, end);
    2822        7853 :   current_ = start;
    2823        7853 :   limit_ = end;
    2824           0 : }
    2825             : 
    2826         251 : size_t NewSpace::CommittedPhysicalMemory() {
    2827         251 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    2828         251 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2829         251 :   size_t size = to_space_.CommittedPhysicalMemory();
    2830         251 :   if (from_space_.is_committed()) {
    2831          91 :     size += from_space_.CommittedPhysicalMemory();
    2832             :   }
    2833         251 :   return size;
    2834             : }
    2835             : 
    2836             : 
    2837             : // -----------------------------------------------------------------------------
    2838             : // Free lists for old object spaces implementation
    2839             : 
    2840             : 
    2841           0 : void FreeListCategory::Reset() {
    2842             :   set_top(FreeSpace());
    2843             :   set_prev(nullptr);
    2844             :   set_next(nullptr);
    2845     2423654 :   available_ = 0;
    2846           0 : }
    2847             : 
    2848      761367 : FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
    2849             :                                              size_t* node_size) {
    2850             :   DCHECK(page()->CanAllocate());
    2851             :   FreeSpace node = top();
    2852     1444606 :   if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
    2853      103421 :     *node_size = 0;
    2854      103421 :     return FreeSpace();
    2855             :   }
    2856             :   set_top(node->next());
    2857      657944 :   *node_size = node->Size();
    2858      657944 :   available_ -= *node_size;
    2859      657944 :   return node;
    2860             : }
    2861             : 
    2862      697775 : FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
    2863             :                                                 size_t* node_size) {
    2864             :   DCHECK(page()->CanAllocate());
    2865             :   FreeSpace prev_non_evac_node;
    2866     1396152 :   for (FreeSpace cur_node = top(); !cur_node.is_null();
    2867             :        cur_node = cur_node->next()) {
    2868      674683 :     size_t size = cur_node->size();
    2869      674683 :     if (size >= minimum_size) {
    2870             :       DCHECK_GE(available_, size);
    2871      674081 :       available_ -= size;
    2872      674081 :       if (cur_node == top()) {
    2873             :         set_top(cur_node->next());
    2874             :       }
    2875      674081 :       if (!prev_non_evac_node.is_null()) {
    2876           0 :         MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
    2877           2 :         if (chunk->owner()->identity() == CODE_SPACE) {
    2878           0 :           chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
    2879             :         }
    2880             :         prev_non_evac_node->set_next(cur_node->next());
    2881             :       }
    2882      674081 :       *node_size = size;
    2883      674081 :       return cur_node;
    2884             :     }
    2885             : 
    2886             :     prev_non_evac_node = cur_node;
    2887             :   }
    2888       23694 :   return FreeSpace();
    2889             : }
    2890             : 
    2891    22741786 : void FreeListCategory::Free(Address start, size_t size_in_bytes,
    2892     3971859 :                             FreeMode mode) {
    2893             :   DCHECK(page()->CanAllocate());
    2894             :   FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
    2895             :   free_space->set_next(top());
    2896             :   set_top(free_space);
    2897    22741786 :   available_ += size_in_bytes;
    2898    25531844 :   if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    2899             :     owner()->AddCategory(this);
    2900             :   }
    2901    22741786 : }
    2902             : 
    2903             : 
    2904       62827 : void FreeListCategory::RepairFreeList(Heap* heap) {
    2905             :   FreeSpace n = top();
    2906      125654 :   while (!n.is_null()) {
    2907             :     MapWordSlot map_location = n.map_slot();
    2908             :     // We can't use .is_null() here because *map_location returns an
    2909             :     // Object (for which "is null" is not defined, as it would be
    2910             :     // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
    2911           0 :     if (*map_location == Map()) {
    2912             :       map_location.store(ReadOnlyRoots(heap).free_space_map());
    2913             :     } else {
    2914             :       DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
    2915             :     }
    2916             :     n = n->next();
    2917             :   }
    2918       62827 : }
    2919             : 
    2920     6492509 : void FreeListCategory::Relink() {
    2921             :   DCHECK(!is_linked());
    2922             :   owner()->AddCategory(this);
    2923           0 : }
    2924             : 
    2925      497901 : FreeList::FreeList() : wasted_bytes_(0) {
    2926     2987406 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2927     2987406 :     categories_[i] = nullptr;
    2928             :   }
    2929      497901 :   Reset();
    2930           0 : }
    2931             : 
    2932             : 
    2933      748377 : void FreeList::Reset() {
    2934             :   ForAllFreeListCategories(
    2935             :       [](FreeListCategory* category) { category->Reset(); });
    2936     4490262 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2937     4490262 :     categories_[i] = nullptr;
    2938             :   }
    2939      748377 :   ResetStats();
    2940      748376 : }
    2941             : 
    2942    23257368 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
    2943             :   Page* page = Page::FromAddress(start);
    2944             :   page->DecreaseAllocatedBytes(size_in_bytes);
    2945             : 
    2946             :   // Blocks have to be a minimum size to hold free list items.
    2947    23257368 :   if (size_in_bytes < kMinBlockSize) {
    2948             :     page->add_wasted_memory(size_in_bytes);
    2949             :     wasted_bytes_ += size_in_bytes;
    2950      516731 :     return size_in_bytes;
    2951             :   }
    2952             : 
    2953             :   // Insert other blocks at the head of a free list of the appropriate
    2954             :   // magnitude.
    2955             :   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
    2956    22740637 :   page->free_list_category(type)->Free(start, size_in_bytes, mode);
    2957             :   DCHECK_EQ(page->AvailableInFreeList(),
    2958             :             page->AvailableInFreeListFromAllocatedBytes());
    2959    22741345 :   return 0;
    2960             : }
    2961             : 
    2962     3226259 : FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    2963             :                                size_t* node_size) {
    2964             :   FreeListCategoryIterator it(this, type);
    2965             :   FreeSpace node;
    2966     6524564 :   while (it.HasNext()) {
    2967             :     FreeListCategory* current = it.Next();
    2968      677590 :     node = current->PickNodeFromList(minimum_size, node_size);
    2969      677589 :     if (!node.is_null()) {
    2970             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2971      605543 :       return node;
    2972             :     }
    2973             :     RemoveCategory(current);
    2974             :   }
    2975     2620715 :   return node;
    2976             : }
    2977             : 
    2978           0 : FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
    2979             :                                   size_t minimum_size, size_t* node_size) {
    2980      450160 :   if (categories_[type] == nullptr) return FreeSpace();
    2981       83776 :   FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
    2982             :   if (!node.is_null()) {
    2983             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2984             :   }
    2985       83776 :   return node;
    2986             : }
    2987             : 
    2988     1326703 : FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
    2989             :                                         size_t* node_size,
    2990             :                                         size_t minimum_size) {
    2991             :   FreeListCategoryIterator it(this, type);
    2992             :   FreeSpace node;
    2993     2677100 :   while (it.HasNext()) {
    2994             :     FreeListCategory* current = it.Next();
    2995      697775 :     node = current->SearchForNodeInList(minimum_size, node_size);
    2996      697775 :     if (!node.is_null()) {
    2997             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2998      674081 :       return node;
    2999             :     }
    3000       23694 :     if (current->is_empty()) {
    3001             :       RemoveCategory(current);
    3002             :     }
    3003             :   }
    3004      652622 :   return node;
    3005             : }
    3006             : 
    3007     1932204 : FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
    3008             :   DCHECK_GE(kMaxBlockSize, size_in_bytes);
    3009             :   FreeSpace node;
    3010             :   // First try the allocation fast path: try to allocate the minimum element
    3011             :   // size of a free list category. This operation is constant time.
    3012             :   FreeListCategoryType type =
    3013             :       SelectFastAllocationFreeListCategoryType(size_in_bytes);
    3014     5158459 :   for (int i = type; i < kHuge && node.is_null(); i++) {
    3015             :     node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
    3016     3226220 :                       node_size);
    3017             :   }
    3018             : 
    3019     1932239 :   if (node.is_null()) {
    3020             :     // Next search the huge list for free list nodes. This takes linear time in
    3021             :     // the number of huge elements.
    3022     1326702 :     node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
    3023             :   }
    3024             : 
    3025     1932223 :   if (node.is_null() && type != kHuge) {
    3026             :     // We didn't find anything in the huge list. Now search the best fitting
    3027             :     // free list for a node that has at least the requested size.
    3028             :     type = SelectFreeListCategoryType(size_in_bytes);
    3029             :     node = TryFindNodeIn(type, size_in_bytes, node_size);
    3030             :   }
    3031             : 
    3032     1932223 :   if (!node.is_null()) {
    3033     1332024 :     Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
    3034             :   }
    3035             : 
    3036             :   DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3037     1932223 :   return node;
    3038             : }
    3039             : 
    3040      217123 : size_t FreeList::EvictFreeListItems(Page* page) {
    3041      217123 :   size_t sum = 0;
    3042     1302738 :   page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
    3043             :     DCHECK_EQ(this, category->owner());
    3044     1302738 :     sum += category->available();
    3045     1302738 :     RemoveCategory(category);
    3046             :     category->Reset();
    3047     1302738 :   });
    3048      217123 :   return sum;
    3049             : }
    3050             : 
    3051           0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
    3052           0 :   bool contained = false;
    3053             :   page->ForAllFreeListCategories(
    3054           0 :       [this, &contained](FreeListCategory* category) {
    3055           0 :         if (category->owner() == this && category->is_linked()) {
    3056           0 :           contained = true;
    3057             :         }
    3058           0 :       });
    3059           0 :   return contained;
    3060             : }
    3061             : 
    3062           0 : void FreeList::RepairLists(Heap* heap) {
    3063             :   ForAllFreeListCategories(
    3064      125654 :       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
    3065           0 : }
    3066             : 
    3067           0 : bool FreeList::AddCategory(FreeListCategory* category) {
    3068     7674310 :   FreeListCategoryType type = category->type_;
    3069             :   DCHECK_LT(type, kNumberOfCategories);
    3070     7674310 :   FreeListCategory* top = categories_[type];
    3071             : 
    3072     7674310 :   if (category->is_empty()) return false;
    3073     2645009 :   if (top == category) return false;
    3074             : 
    3075             :   // Common double-linked list insertion.
    3076     1998224 :   if (top != nullptr) {
    3077             :     top->set_prev(category);
    3078             :   }
    3079             :   category->set_next(top);
    3080     1998224 :   categories_[type] = category;
    3081           0 :   return true;
    3082             : }
    3083             : 
    3084     5273166 : void FreeList::RemoveCategory(FreeListCategory* category) {
    3085     2362841 :   FreeListCategoryType type = category->type_;
    3086             :   DCHECK_LT(type, kNumberOfCategories);
    3087     2362841 :   FreeListCategory* top = categories_[type];
    3088             : 
    3089             :   // Common double-linked list removal.
    3090     2362841 :   if (top == category) {
    3091      473209 :     categories_[type] = category->next();
    3092             :   }
    3093     2362841 :   if (category->prev() != nullptr) {
    3094             :     category->prev()->set_next(category->next());
    3095             :   }
    3096     2362841 :   if (category->next() != nullptr) {
    3097             :     category->next()->set_prev(category->prev());
    3098             :   }
    3099             :   category->set_next(nullptr);
    3100             :   category->set_prev(nullptr);
    3101           6 : }
    3102             : 
    3103           0 : void FreeList::PrintCategories(FreeListCategoryType type) {
    3104             :   FreeListCategoryIterator it(this, type);
    3105             :   PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
    3106           0 :          static_cast<void*>(categories_[type]), type);
    3107           0 :   while (it.HasNext()) {
    3108             :     FreeListCategory* current = it.Next();
    3109           0 :     PrintF("%p -> ", static_cast<void*>(current));
    3110             :   }
    3111           0 :   PrintF("null\n");
    3112           0 : }
    3113             : 
    3114             : 
    3115             : #ifdef DEBUG
    3116             : size_t FreeListCategory::SumFreeList() {
    3117             :   size_t sum = 0;
    3118             :   FreeSpace cur = top();
    3119             :   while (!cur.is_null()) {
    3120             :     // We can't use "cur->map()" here because both cur's map and the
    3121             :     // root can be null during bootstrapping.
    3122             :     DCHECK_EQ(*cur->map_slot(),
    3123             :               page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
    3124             :     sum += cur->relaxed_read_size();
    3125             :     cur = cur->next();
    3126             :   }
    3127             :   return sum;
    3128             : }
    3129             : 
    3130             : int FreeListCategory::FreeListLength() {
    3131             :   int length = 0;
    3132             :   FreeSpace cur = top();
    3133             :   while (!cur.is_null()) {
    3134             :     length++;
    3135             :     cur = cur->next();
    3136             :     if (length == kVeryLongFreeList) return length;
    3137             :   }
    3138             :   return length;
    3139             : }
    3140             : 
    3141             : bool FreeList::IsVeryLong() {
    3142             :   int len = 0;
    3143             :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    3144             :     FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    3145             :     while (it.HasNext()) {
    3146             :       len += it.Next()->FreeListLength();
    3147             :       if (len >= FreeListCategory::kVeryLongFreeList) return true;
    3148             :     }
    3149             :   }
    3150             :   return false;
    3151             : }
    3152             : 
    3153             : 
    3154             : // This can take a very long time because it is linear in the number of entries
    3155             : // on the free list, so it should not be called if FreeListLength returns
    3156             : // kVeryLongFreeList.
    3157             : size_t FreeList::SumFreeLists() {
    3158             :   size_t sum = 0;
    3159             :   ForAllFreeListCategories(
    3160             :       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
    3161             :   return sum;
    3162             : }
    3163             : #endif
    3164             : 
    3165             : 
    3166             : // -----------------------------------------------------------------------------
    3167             : // OldSpace implementation
    3168             : 
    3169      250476 : void PagedSpace::PrepareForMarkCompact() {
    3170             :   // We don't have a linear allocation area while sweeping.  It will be restored
    3171             :   // on the first allocation after the sweep.
    3172      250476 :   FreeLinearAllocationArea();
    3173             : 
    3174             :   // Clear the free list before a full GC---it will be rebuilt afterward.
    3175      250476 :   free_list_.Reset();
    3176      250476 : }
    3177             : 
    3178    11293225 : size_t PagedSpace::SizeOfObjects() {
    3179    11293225 :   CHECK_GE(limit(), top());
    3180             :   DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
    3181    22586448 :   return Size() - (limit() - top());
    3182             : }
    3183             : 
    3184         166 : bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3185         166 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3186         166 :   if (collector->sweeping_in_progress()) {
    3187             :     // Wait for the sweeper threads here and complete the sweeping phase.
    3188          10 :     collector->EnsureSweepingCompleted();
    3189             : 
    3190             :     // After waiting for the sweeper threads, there may be new free-list
    3191             :     // entries.
    3192          10 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3193             :   }
    3194             :   return false;
    3195             : }
    3196             : 
    3197          50 : bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3198          60 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3199          55 :   if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
    3200           5 :     collector->sweeper()->ParallelSweepSpace(identity(), 0);
    3201           5 :     RefillFreeList();
    3202           5 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3203             :   }
    3204             :   return false;
    3205             : }
    3206             : 
    3207     1151790 : bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3208     2303580 :   VMState<GC> state(heap()->isolate());
    3209             :   RuntimeCallTimerScope runtime_timer(
    3210     1151790 :       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
    3211     2303581 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3212             : }
    3213             : 
    3214      180329 : bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3215      180329 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3216             : }
    3217             : 
    3218     1332120 : bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
    3219             :   // Allocation in this space has failed.
    3220             :   DCHECK_GE(size_in_bytes, 0);
    3221             :   const int kMaxPagesToSweep = 1;
    3222             : 
    3223     1332120 :   if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
    3224             : 
    3225     1126495 :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3226             :   // Sweeping is still in progress.
    3227      530944 :   if (collector->sweeping_in_progress()) {
    3228      137033 :     if (FLAG_concurrent_sweeping && !is_local() &&
    3229       44065 :         !collector->sweeper()->AreSweeperTasksRunning()) {
    3230       28128 :       collector->EnsureSweepingCompleted();
    3231             :     }
    3232             : 
    3233             :     // First try to refill the free-list, concurrent sweeper threads
    3234             :     // may have freed some objects in the meantime.
    3235       92969 :     RefillFreeList();
    3236             : 
    3237             :     // Retry the free list allocation.
    3238       93007 :     if (RefillLinearAllocationAreaFromFreeList(
    3239             :             static_cast<size_t>(size_in_bytes)))
    3240             :       return true;
    3241             : 
    3242             :     // If sweeping is still in progress try to sweep pages.
    3243             :     int max_freed = collector->sweeper()->ParallelSweepSpace(
    3244       57973 :         identity(), size_in_bytes, kMaxPagesToSweep);
    3245       57990 :     RefillFreeList();
    3246       57990 :     if (max_freed >= size_in_bytes) {
    3247       45165 :       if (RefillLinearAllocationAreaFromFreeList(
    3248             :               static_cast<size_t>(size_in_bytes)))
    3249             :         return true;
    3250             :     }
    3251      437976 :   } else if (is_local()) {
    3252             :     // Sweeping not in progress and we are on a {CompactionSpace}. This can
    3253             :     // only happen when we are evacuating for the young generation.
    3254       24974 :     PagedSpace* main_space = heap()->paged_space(identity());
    3255       24974 :     Page* page = main_space->RemovePageSafe(size_in_bytes);
    3256       24996 :     if (page != nullptr) {
    3257       18590 :       AddPage(page);
    3258       18591 :       if (RefillLinearAllocationAreaFromFreeList(
    3259             :               static_cast<size_t>(size_in_bytes)))
    3260             :         return true;
    3261             :     }
    3262             :   }
    3263             : 
    3264      443565 :   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
    3265             :     DCHECK((CountTotalPages() > 1) ||
    3266             :            (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
    3267             :     return RefillLinearAllocationAreaFromFreeList(
    3268      443349 :         static_cast<size_t>(size_in_bytes));
    3269             :   }
    3270             : 
    3271             :   // If sweeper threads are active, wait for them at that point and steal
    3272             :   // elements form their free-lists. Allocation may still fail their which
    3273             :   // would indicate that there is not enough memory for the given allocation.
    3274         216 :   return SweepAndRetryAllocation(size_in_bytes);
    3275             : }
    3276             : 
    3277             : // -----------------------------------------------------------------------------
    3278             : // MapSpace implementation
    3279             : 
    3280             : #ifdef VERIFY_HEAP
    3281             : void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
    3282             : #endif
    3283             : 
    3284       62883 : ReadOnlySpace::ReadOnlySpace(Heap* heap)
    3285             :     : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
    3286      125766 :       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
    3287       62883 : }
    3288             : 
    3289       63324 : void ReadOnlyPage::MakeHeaderRelocatable() {
    3290       63324 :   if (mutex_ != nullptr) {
    3291             :     // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
    3292       62883 :     delete mutex_;
    3293       62883 :     mutex_ = nullptr;
    3294       62883 :     local_tracker_ = nullptr;
    3295       62883 :     reservation_.Reset();
    3296             :   }
    3297       63324 : }
    3298             : 
    3299       63765 : void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
    3300             :   const size_t page_size = MemoryAllocator::GetCommitPageSize();
    3301             :   const size_t area_start_offset =
    3302       63765 :       RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
    3303      127530 :   MemoryAllocator* memory_allocator = heap()->memory_allocator();
    3304      127530 :   for (Page* p : *this) {
    3305             :     ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
    3306       63765 :     if (access == PageAllocator::kRead) {
    3307       63324 :       page->MakeHeaderRelocatable();
    3308             :     }
    3309             : 
    3310             :     // Read only pages don't have valid reservation object so we get proper
    3311             :     // page allocator manually.
    3312             :     v8::PageAllocator* page_allocator =
    3313       63765 :         memory_allocator->page_allocator(page->executable());
    3314      191295 :     CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
    3315             :                          page->size() - area_start_offset, access));
    3316             :   }
    3317       63765 : }
    3318             : 
    3319             : // After we have booted, we have created a map which represents free space
    3320             : // on the heap.  If there was already a free list then the elements on it
    3321             : // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
    3322             : // fix them.
    3323       62827 : void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
    3324       62827 :   free_list_.RepairLists(heap());
    3325             :   // Each page may have a small free space that is not tracked by a free list.
    3326             :   // Those free spaces still contain null as their map pointer.
    3327             :   // Overwrite them with new fillers.
    3328      188481 :   for (Page* page : *this) {
    3329       62827 :     int size = static_cast<int>(page->wasted_memory());
    3330       62827 :     if (size == 0) {
    3331             :       // If there is no wasted memory then all free space is in the free list.
    3332             :       continue;
    3333             :     }
    3334           0 :     Address start = page->HighWaterMark();
    3335             :     Address end = page->area_end();
    3336           0 :     if (start < end - size) {
    3337             :       // A region at the high watermark is already in free list.
    3338           0 :       HeapObject filler = HeapObject::FromAddress(start);
    3339           0 :       CHECK(filler->IsFiller());
    3340           0 :       start += filler->Size();
    3341             :     }
    3342           0 :     CHECK_EQ(size, static_cast<int>(end - start));
    3343           0 :     heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
    3344             :   }
    3345       62827 : }
    3346             : 
    3347         620 : void ReadOnlySpace::ClearStringPaddingIfNeeded() {
    3348        1240 :   if (is_string_padding_cleared_) return;
    3349             : 
    3350             :   WritableScope writable_scope(this);
    3351         882 :   for (Page* page : *this) {
    3352         441 :     HeapObjectIterator iterator(page);
    3353     1111180 :     for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
    3354      555149 :       if (o->IsSeqOneByteString()) {
    3355      223146 :         SeqOneByteString::cast(o)->clear_padding();
    3356      332003 :       } else if (o->IsSeqTwoByteString()) {
    3357           0 :         SeqTwoByteString::cast(o)->clear_padding();
    3358             :       }
    3359             :     }
    3360             :   }
    3361         441 :   is_string_padding_cleared_ = true;
    3362             : }
    3363             : 
    3364       63324 : void ReadOnlySpace::MarkAsReadOnly() {
    3365             :   DCHECK(!is_marked_read_only_);
    3366       63324 :   FreeLinearAllocationArea();
    3367       63324 :   is_marked_read_only_ = true;
    3368       63324 :   SetPermissionsForPages(PageAllocator::kRead);
    3369       63324 : }
    3370             : 
    3371           0 : void ReadOnlySpace::MarkAsReadWrite() {
    3372             :   DCHECK(is_marked_read_only_);
    3373         441 :   SetPermissionsForPages(PageAllocator::kReadWrite);
    3374         441 :   is_marked_read_only_ = false;
    3375           0 : }
    3376             : 
    3377       56196 : Address LargePage::GetAddressToShrink(Address object_address,
    3378             :                                       size_t object_size) {
    3379       56196 :   if (executable() == EXECUTABLE) {
    3380             :     return 0;
    3381             :   }
    3382       20048 :   size_t used_size = ::RoundUp((object_address - address()) + object_size,
    3383       10024 :                                MemoryAllocator::GetCommitPageSize());
    3384       10024 :   if (used_size < CommittedPhysicalMemory()) {
    3385          51 :     return address() + used_size;
    3386             :   }
    3387             :   return 0;
    3388             : }
    3389             : 
    3390          51 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
    3391             :   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
    3392         153 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3393             :   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
    3394          51 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3395          51 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
    3396          51 :   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
    3397          51 : }
    3398             : 
    3399             : // -----------------------------------------------------------------------------
    3400             : // LargeObjectIterator
    3401             : 
    3402       23559 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
    3403       23559 :   current_ = space->first_page();
    3404           0 : }
    3405             : 
    3406       26824 : HeapObject LargeObjectIterator::Next() {
    3407      250004 :   if (current_ == nullptr) return HeapObject();
    3408             : 
    3409             :   HeapObject object = current_->GetObject();
    3410        3265 :   current_ = current_->next_page();
    3411        3265 :   return object;
    3412             : }
    3413             : 
    3414             : // -----------------------------------------------------------------------------
    3415             : // LargeObjectSpace
    3416             : 
    3417       62883 : LargeObjectSpace::LargeObjectSpace(Heap* heap)
    3418       62883 :     : LargeObjectSpace(heap, LO_SPACE) {}
    3419             : 
    3420      188649 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
    3421             :     : Space(heap, id),
    3422             :       size_(0),
    3423             :       page_count_(0),
    3424             :       objects_size_(0),
    3425      377298 :       chunk_map_(1024) {}
    3426             : 
    3427      188604 : void LargeObjectSpace::TearDown() {
    3428      429920 :   while (!memory_chunk_list_.Empty()) {
    3429             :     LargePage* page = first_page();
    3430      158136 :     LOG(heap()->isolate(),
    3431             :         DeleteEvent("LargeObjectChunk",
    3432             :                     reinterpret_cast<void*>(page->address())));
    3433       52712 :     memory_chunk_list_.Remove(page);
    3434       52712 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    3435             :   }
    3436      188604 : }
    3437             : 
    3438       17617 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
    3439       17617 :   return AllocateRaw(object_size, NOT_EXECUTABLE);
    3440             : }
    3441             : 
    3442       57800 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
    3443             :                                                Executability executable) {
    3444             :   // Check if we want to force a GC before growing the old space further.
    3445             :   // If so, fail the allocation.
    3446      345941 :   if (!heap()->CanExpandOldGeneration(object_size) ||
    3447       57717 :       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
    3448             :     return AllocationResult::Retry(identity());
    3449             :   }
    3450             : 
    3451       57606 :   LargePage* page = AllocateLargePage(object_size, executable);
    3452       57606 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3453       57606 :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3454             :   HeapObject object = page->GetObject();
    3455             :   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    3456             :       heap()->GCFlagsForIncrementalMarking(),
    3457       57606 :       kGCCallbackScheduleIdleGarbageCollection);
    3458       57606 :   if (heap()->incremental_marking()->black_allocation()) {
    3459             :     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
    3460             :   }
    3461             :   DCHECK_IMPLIES(
    3462             :       heap()->incremental_marking()->black_allocation(),
    3463             :       heap()->incremental_marking()->marking_state()->IsBlack(object));
    3464             :   page->InitializationMemoryFence();
    3465       57606 :   return object;
    3466             : }
    3467             : 
    3468       57606 : LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
    3469             :                                                Executability executable) {
    3470             :   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
    3471      115212 :       object_size, this, executable);
    3472       57606 :   if (page == nullptr) return nullptr;
    3473             :   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
    3474             : 
    3475       57606 :   Register(page, object_size);
    3476             : 
    3477             :   HeapObject object = page->GetObject();
    3478             : 
    3479             :   heap()->CreateFillerObjectAt(object->address(), object_size,
    3480       57606 :                                ClearRecordedSlots::kNo);
    3481       57606 :   AllocationStep(object_size, object->address(), object_size);
    3482       57606 :   return page;
    3483             : }
    3484             : 
    3485             : 
    3486         753 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
    3487             :   // On a platform that provides lazy committing of memory, we over-account
    3488             :   // the actually committed memory. There is no easy way right now to support
    3489             :   // precise accounting of committed memory in large object space.
    3490         753 :   return CommittedMemory();
    3491             : }
    3492             : 
    3493             : 
    3494             : // GC support
    3495           5 : Object LargeObjectSpace::FindObject(Address a) {
    3496           5 :   LargePage* page = FindPage(a);
    3497           5 :   if (page != nullptr) {
    3498           5 :     return page->GetObject();
    3499             :   }
    3500           0 :   return Smi::kZero;  // Signaling not found.
    3501             : }
    3502             : 
    3503      521480 : LargePage* LargeObjectSpace::FindPage(Address a) {
    3504      521480 :   const Address key = MemoryChunk::FromAddress(a)->address();
    3505             :   auto it = chunk_map_.find(key);
    3506      521480 :   if (it != chunk_map_.end()) {
    3507          15 :     LargePage* page = it->second;
    3508          15 :     if (page->Contains(a)) {
    3509          15 :       return page;
    3510             :     }
    3511             :   }
    3512             :   return nullptr;
    3513             : }
    3514             : 
    3515             : 
    3516      166984 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
    3517             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3518             :       heap()->incremental_marking()->non_atomic_marking_state();
    3519             :   LargeObjectIterator it(this);
    3520      390164 :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3521       56196 :     if (marking_state->IsBlackOrGrey(obj)) {
    3522             :       Marking::MarkWhite(marking_state->MarkBitFrom(obj));
    3523             :       MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    3524       56196 :       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
    3525       56196 :       chunk->ResetProgressBar();
    3526             :       marking_state->SetLiveBytes(chunk, 0);
    3527             :     }
    3528             :     DCHECK(marking_state->IsWhite(obj));
    3529             :   }
    3530      166984 : }
    3531             : 
    3532       57606 : void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
    3533             :   // There may be concurrent access on the chunk map. We have to take the lock
    3534             :   // here.
    3535       57606 :   base::MutexGuard guard(&chunk_map_mutex_);
    3536      493438 :   for (Address current = reinterpret_cast<Address>(page);
    3537      246719 :        current < reinterpret_cast<Address>(page) + page->size();
    3538             :        current += MemoryChunk::kPageSize) {
    3539      189113 :     chunk_map_[current] = page;
    3540             :   }
    3541       57606 : }
    3542             : 
    3543           0 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
    3544        9788 :   RemoveChunkMapEntries(page, page->address());
    3545           0 : }
    3546             : 
    3547        4945 : void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
    3548             :                                              Address free_start) {
    3549      129636 :   for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
    3550       64818 :        current < reinterpret_cast<Address>(page) + page->size();
    3551             :        current += MemoryChunk::kPageSize) {
    3552             :     chunk_map_.erase(current);
    3553             :   }
    3554        4945 : }
    3555             : 
    3556           0 : void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
    3557             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3558             :   DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
    3559             :   DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
    3560           0 :   size_t object_size = static_cast<size_t>(page->GetObject()->Size());
    3561             :   reinterpret_cast<NewLargeObjectSpace*>(page->owner())
    3562           0 :       ->Unregister(page, object_size);
    3563           0 :   Register(page, object_size);
    3564             :   page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
    3565           0 :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3566           0 :   page->set_owner(this);
    3567           0 : }
    3568             : 
    3569       57606 : void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
    3570       57606 :   size_ += static_cast<int>(page->size());
    3571             :   AccountCommitted(page->size());
    3572       57606 :   objects_size_ += object_size;
    3573       57606 :   page_count_++;
    3574             :   memory_chunk_list_.PushBack(page);
    3575             : 
    3576       57606 :   InsertChunkMapEntries(page);
    3577       57606 : }
    3578             : 
    3579           0 : void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
    3580           0 :   size_ -= static_cast<int>(page->size());
    3581             :   AccountUncommitted(page->size());
    3582           0 :   objects_size_ -= object_size;
    3583           0 :   page_count_--;
    3584           0 :   memory_chunk_list_.Remove(page);
    3585             : 
    3586             :   RemoveChunkMapEntries(page);
    3587           0 : }
    3588             : 
    3589      250476 : void LargeObjectSpace::FreeUnmarkedObjects() {
    3590             :   LargePage* current = first_page();
    3591             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3592        4945 :       heap()->incremental_marking()->non_atomic_marking_state();
    3593             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3594             :   // updating it after every GC.
    3595      250476 :   objects_size_ = 0;
    3596      562042 :   while (current) {
    3597             :     LargePage* next_current = current->next_page();
    3598       61090 :     HeapObject object = current->GetObject();
    3599             :     DCHECK(!marking_state->IsGrey(object));
    3600       61090 :     if (marking_state->IsBlack(object)) {
    3601             :       Address free_start;
    3602       56196 :       size_t size = static_cast<size_t>(object->Size());
    3603       56196 :       objects_size_ += size;
    3604       56196 :       if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
    3605             :           0) {
    3606             :         DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
    3607          51 :         current->ClearOutOfLiveRangeSlots(free_start);
    3608          51 :         RemoveChunkMapEntries(current, free_start);
    3609             :         const size_t bytes_to_free =
    3610        9890 :             current->size() - (free_start - current->address());
    3611             :         heap()->memory_allocator()->PartialFreeMemory(
    3612             :             current, free_start, bytes_to_free,
    3613         102 :             current->area_start() + object->Size());
    3614          51 :         size_ -= bytes_to_free;
    3615             :         AccountUncommitted(bytes_to_free);
    3616             :       }
    3617             :     } else {
    3618        4894 :       memory_chunk_list_.Remove(current);
    3619             : 
    3620             :       // Free the chunk.
    3621        4894 :       size_ -= static_cast<int>(current->size());
    3622             :       AccountUncommitted(current->size());
    3623        4894 :       page_count_--;
    3624             : 
    3625             :       RemoveChunkMapEntries(current);
    3626             :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
    3627        4894 :           current);
    3628             :     }
    3629             :     current = next_current;
    3630             :   }
    3631      250476 : }
    3632             : 
    3633        3374 : bool LargeObjectSpace::Contains(HeapObject object) {
    3634             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3635             : 
    3636        3374 :   bool owned = (chunk->owner() == this);
    3637             : 
    3638             :   SLOW_DCHECK(!owned || FindObject(object->address())->IsHeapObject());
    3639             : 
    3640        3374 :   return owned;
    3641             : }
    3642             : 
    3643       23559 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
    3644       23559 :   return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
    3645             : }
    3646             : 
    3647             : #ifdef VERIFY_HEAP
    3648             : // We do not assume that the large object iterator works, because it depends
    3649             : // on the invariants we are checking during verification.
    3650             : void LargeObjectSpace::Verify(Isolate* isolate) {
    3651             :   size_t external_backing_store_bytes[kNumTypes];
    3652             : 
    3653             :   for (int i = 0; i < kNumTypes; i++) {
    3654             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    3655             :   }
    3656             : 
    3657             :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3658             :        chunk = chunk->next_page()) {
    3659             :     // Each chunk contains an object that starts at the large object page's
    3660             :     // object area start.
    3661             :     HeapObject object = chunk->GetObject();
    3662             :     Page* page = Page::FromHeapObject(object);
    3663             :     CHECK(object->address() == page->area_start());
    3664             : 
    3665             :     // The first word should be a map, and we expect all map pointers to be
    3666             :     // in map space or read-only space.
    3667             :     Map map = object->map();
    3668             :     CHECK(map->IsMap());
    3669             :     CHECK(heap()->map_space()->Contains(map) ||
    3670             :           heap()->read_only_space()->Contains(map));
    3671             : 
    3672             :     // We have only the following types in the large object space:
    3673             :     CHECK(object->IsAbstractCode() || object->IsSeqString() ||
    3674             :           object->IsExternalString() || object->IsThinString() ||
    3675             :           object->IsFixedArray() || object->IsFixedDoubleArray() ||
    3676             :           object->IsWeakFixedArray() || object->IsWeakArrayList() ||
    3677             :           object->IsPropertyArray() || object->IsByteArray() ||
    3678             :           object->IsFeedbackVector() || object->IsBigInt() ||
    3679             :           object->IsFreeSpace() || object->IsFeedbackMetadata() ||
    3680             :           object->IsContext() || object->IsUncompiledDataWithoutPreparseData());
    3681             : 
    3682             :     // The object itself should look OK.
    3683             :     object->ObjectVerify(isolate);
    3684             : 
    3685             :     if (!FLAG_verify_heap_skip_remembered_set) {
    3686             :       heap()->VerifyRememberedSetFor(object);
    3687             :     }
    3688             : 
    3689             :     // Byte arrays and strings don't have interior pointers.
    3690             :     if (object->IsAbstractCode()) {
    3691             :       VerifyPointersVisitor code_visitor(heap());
    3692             :       object->IterateBody(map, object->Size(), &code_visitor);
    3693             :     } else if (object->IsFixedArray()) {
    3694             :       FixedArray array = FixedArray::cast(object);
    3695             :       for (int j = 0; j < array->length(); j++) {
    3696             :         Object element = array->get(j);
    3697             :         if (element->IsHeapObject()) {
    3698             :           HeapObject element_object = HeapObject::cast(element);
    3699             :           CHECK(heap()->Contains(element_object));
    3700             :           CHECK(element_object->map()->IsMap());
    3701             :         }
    3702             :       }
    3703             :     } else if (object->IsPropertyArray()) {
    3704             :       PropertyArray array = PropertyArray::cast(object);
    3705             :       for (int j = 0; j < array->length(); j++) {
    3706             :         Object property = array->get(j);
    3707             :         if (property->IsHeapObject()) {
    3708             :           HeapObject property_object = HeapObject::cast(property);
    3709             :           CHECK(heap()->Contains(property_object));
    3710             :           CHECK(property_object->map()->IsMap());
    3711             :         }
    3712             :       }
    3713             :     }
    3714             :     for (int i = 0; i < kNumTypes; i++) {
    3715             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3716             :       external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
    3717             :     }
    3718             :   }
    3719             :   for (int i = 0; i < kNumTypes; i++) {
    3720             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3721             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    3722             :   }
    3723             : }
    3724             : #endif
    3725             : 
    3726             : #ifdef DEBUG
    3727             : void LargeObjectSpace::Print() {
    3728             :   StdoutStream os;
    3729             :   LargeObjectIterator it(this);
    3730             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3731             :     obj->Print(os);
    3732             :   }
    3733             : }
    3734             : 
    3735             : void Page::Print() {
    3736             :   // Make a best-effort to print the objects in the page.
    3737             :   PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
    3738             :          this->owner()->name());
    3739             :   printf(" --------------------------------------\n");
    3740             :   HeapObjectIterator objects(this);
    3741             :   unsigned mark_size = 0;
    3742             :   for (HeapObject object = objects.Next(); !object.is_null();
    3743             :        object = objects.Next()) {
    3744             :     bool is_marked =
    3745             :         heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
    3746             :     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    3747             :     if (is_marked) {
    3748             :       mark_size += object->Size();
    3749             :     }
    3750             :     object->ShortPrint();
    3751             :     PrintF("\n");
    3752             :   }
    3753             :   printf(" --------------------------------------\n");
    3754             :   printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
    3755             :          heap()->incremental_marking()->marking_state()->live_bytes(this));
    3756             : }
    3757             : 
    3758             : #endif  // DEBUG
    3759             : 
    3760       62883 : NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
    3761       62883 :     : LargeObjectSpace(heap, NEW_LO_SPACE) {}
    3762             : 
    3763           0 : AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
    3764             :   // TODO(hpayer): Add heap growing strategy here.
    3765           0 :   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
    3766           0 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3767           0 :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3768             :   page->SetFlag(MemoryChunk::IN_TO_SPACE);
    3769             :   page->InitializationMemoryFence();
    3770           0 :   return page->GetObject();
    3771             : }
    3772             : 
    3773         312 : size_t NewLargeObjectSpace::Available() {
    3774             :   // TODO(hpayer): Update as soon as we have a growing strategy.
    3775         312 :   return 0;
    3776             : }
    3777             : 
    3778      107086 : void NewLargeObjectSpace::Flip() {
    3779      214172 :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3780             :        chunk = chunk->next_page()) {
    3781             :     chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
    3782             :     chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
    3783             :   }
    3784      107086 : }
    3785             : 
    3786       23594 : void NewLargeObjectSpace::FreeAllObjects() {
    3787             :   LargePage* current = first_page();
    3788       47188 :   while (current) {
    3789             :     LargePage* next_current = current->next_page();
    3790           0 :     Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
    3791             :     heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
    3792           0 :         current);
    3793             :     current = next_current;
    3794             :   }
    3795             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3796             :   // updating it after every GC.
    3797       23594 :   objects_size_ = 0;
    3798       23594 : }
    3799             : 
    3800       62883 : CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
    3801       62883 :     : LargeObjectSpace(heap, CODE_LO_SPACE) {}
    3802             : 
    3803       40183 : AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
    3804       40183 :   return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
    3805             : }
    3806             : 
    3807             : }  // namespace internal
    3808      183867 : }  // namespace v8

Generated by: LCOV version 1.10