LCOV - code coverage report
Current view: top level - src/heap - spaces.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1187 1337 88.8 %
Date: 2019-03-21 Functions: 209 250 83.6 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/spaces.h"
       6             : 
       7             : #include <utility>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/macros.h"
      11             : #include "src/base/platform/semaphore.h"
      12             : #include "src/base/template-utils.h"
      13             : #include "src/counters.h"
      14             : #include "src/heap/array-buffer-tracker.h"
      15             : #include "src/heap/concurrent-marking.h"
      16             : #include "src/heap/gc-tracer.h"
      17             : #include "src/heap/heap-controller.h"
      18             : #include "src/heap/incremental-marking-inl.h"
      19             : #include "src/heap/mark-compact.h"
      20             : #include "src/heap/remembered-set.h"
      21             : #include "src/heap/slot-set.h"
      22             : #include "src/heap/sweeper.h"
      23             : #include "src/msan.h"
      24             : #include "src/objects-inl.h"
      25             : #include "src/objects/free-space-inl.h"
      26             : #include "src/objects/js-array-buffer-inl.h"
      27             : #include "src/objects/js-array-inl.h"
      28             : #include "src/ostreams.h"
      29             : #include "src/snapshot/snapshot.h"
      30             : #include "src/v8.h"
      31             : #include "src/vm-state-inl.h"
      32             : 
      33             : namespace v8 {
      34             : namespace internal {
      35             : 
      36             : // These checks are here to ensure that the lower 32 bits of any real heap
      37             : // object can't overlap with the lower 32 bits of cleared weak reference value
      38             : // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
      39             : // in order to figure out if it's a cleared weak reference or not.
      40             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
      41             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
      42             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
      43             : 
      44             : // ----------------------------------------------------------------------------
      45             : // HeapObjectIterator
      46             : 
      47           6 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
      48             :     : cur_addr_(kNullAddress),
      49             :       cur_end_(kNullAddress),
      50             :       space_(space),
      51             :       page_range_(space->first_page(), nullptr),
      52       31292 :       current_page_(page_range_.begin()) {}
      53             : 
      54           0 : HeapObjectIterator::HeapObjectIterator(Page* page)
      55             :     : cur_addr_(kNullAddress),
      56             :       cur_end_(kNullAddress),
      57             :       space_(reinterpret_cast<PagedSpace*>(page->owner())),
      58             :       page_range_(page),
      59         896 :       current_page_(page_range_.begin()) {
      60             : #ifdef DEBUG
      61             :   Space* owner = page->owner();
      62             :   DCHECK(owner == page->heap()->old_space() ||
      63             :          owner == page->heap()->map_space() ||
      64             :          owner == page->heap()->code_space() ||
      65             :          owner == page->heap()->read_only_space());
      66             : #endif  // DEBUG
      67           0 : }
      68             : 
      69             : // We have hit the end of the page and should advance to the next block of
      70             : // objects.  This happens at the end of the page.
      71       85641 : bool HeapObjectIterator::AdvanceToNextPage() {
      72             :   DCHECK_EQ(cur_addr_, cur_end_);
      73       85641 :   if (current_page_ == page_range_.end()) return false;
      74             :   Page* cur_page = *(current_page_++);
      75       53909 :   Heap* heap = space_->heap();
      76             : 
      77       53909 :   heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
      78             : #ifdef ENABLE_MINOR_MC
      79       53909 :   if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
      80             :     heap->minor_mark_compact_collector()->MakeIterable(
      81             :         cur_page, MarkingTreatmentMode::CLEAR,
      82           0 :         FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
      83             : #else
      84             :   DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
      85             : #endif  // ENABLE_MINOR_MC
      86       53909 :   cur_addr_ = cur_page->area_start();
      87       53909 :   cur_end_ = cur_page->area_end();
      88             :   DCHECK(cur_page->SweepingDone());
      89       53909 :   return true;
      90             : }
      91             : 
      92       96968 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
      93       96968 :     : heap_(heap) {
      94             :   DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
      95             : 
      96      872712 :   for (SpaceIterator it(heap_); it.has_next();) {
      97      775744 :     it.next()->PauseAllocationObservers();
      98             :   }
      99       96968 : }
     100             : 
     101      193936 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
     102      872712 :   for (SpaceIterator it(heap_); it.has_next();) {
     103      775744 :     it.next()->ResumeAllocationObservers();
     104             :   }
     105       96968 : }
     106             : 
     107             : static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
     108             :     LAZY_INSTANCE_INITIALIZER;
     109             : 
     110       62554 : Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
     111       62554 :   base::MutexGuard guard(&mutex_);
     112             :   auto it = recently_freed_.find(code_range_size);
     113       62555 :   if (it == recently_freed_.end() || it->second.empty()) {
     114       60401 :     return reinterpret_cast<Address>(GetRandomMmapAddr());
     115             :   }
     116        2154 :   Address result = it->second.back();
     117             :   it->second.pop_back();
     118        2154 :   return result;
     119             : }
     120             : 
     121       62536 : void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
     122             :                                                 size_t code_range_size) {
     123       62536 :   base::MutexGuard guard(&mutex_);
     124       62537 :   recently_freed_[code_range_size].push_back(code_range_start);
     125       62537 : }
     126             : 
     127             : // -----------------------------------------------------------------------------
     128             : // MemoryAllocator
     129             : //
     130             : 
     131       62548 : MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
     132             :                                  size_t code_range_size)
     133             :     : isolate_(isolate),
     134       62548 :       data_page_allocator_(isolate->page_allocator()),
     135             :       code_page_allocator_(nullptr),
     136             :       capacity_(RoundUp(capacity, Page::kPageSize)),
     137             :       size_(0),
     138             :       size_executable_(0),
     139             :       lowest_ever_allocated_(static_cast<Address>(-1ll)),
     140             :       highest_ever_allocated_(kNullAddress),
     141      250189 :       unmapper_(isolate->heap(), this) {
     142       62549 :   InitializeCodePageAllocator(data_page_allocator_, code_range_size);
     143       62549 : }
     144             : 
     145       62548 : void MemoryAllocator::InitializeCodePageAllocator(
     146             :     v8::PageAllocator* page_allocator, size_t requested) {
     147             :   DCHECK_NULL(code_page_allocator_instance_.get());
     148             : 
     149       62548 :   code_page_allocator_ = page_allocator;
     150             : 
     151       62548 :   if (requested == 0) {
     152             :     if (!kRequiresCodeRange) return;
     153             :     // When a target requires the code range feature, we put all code objects
     154             :     // in a kMaximalCodeRangeSize range of virtual address space, so that
     155             :     // they can call each other with near calls.
     156             :     requested = kMaximalCodeRangeSize;
     157           1 :   } else if (requested <= kMinimumCodeRangeSize) {
     158             :     requested = kMinimumCodeRangeSize;
     159             :   }
     160             : 
     161             :   const size_t reserved_area =
     162             :       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
     163             :   if (requested < (kMaximalCodeRangeSize - reserved_area)) {
     164             :     requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
     165             :     // Fullfilling both reserved pages requirement and huge code area
     166             :     // alignments is not supported (requires re-implementation).
     167             :     DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
     168             :   }
     169             :   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
     170             : 
     171             :   Address hint =
     172      125095 :       RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
     173       62549 :                 page_allocator->AllocatePageSize());
     174             :   VirtualMemory reservation(
     175             :       page_allocator, requested, reinterpret_cast<void*>(hint),
     176      187647 :       Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
     177       62549 :   if (!reservation.IsReserved()) {
     178           0 :     V8::FatalProcessOutOfMemory(isolate_,
     179           0 :                                 "CodeRange setup: allocate virtual memory");
     180             :   }
     181       62549 :   code_range_ = reservation.region();
     182             : 
     183             :   // We are sure that we have mapped a block of requested addresses.
     184             :   DCHECK_GE(reservation.size(), requested);
     185             :   Address base = reservation.address();
     186             : 
     187             :   // On some platforms, specifically Win64, we need to reserve some pages at
     188             :   // the beginning of an executable space. See
     189             :   //   https://cs.chromium.org/chromium/src/components/crash/content/
     190             :   //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
     191             :   // for details.
     192             :   if (reserved_area > 0) {
     193             :     if (!reservation.SetPermissions(base, reserved_area,
     194             :                                     PageAllocator::kReadWrite))
     195             :       V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
     196             : 
     197             :     base += reserved_area;
     198             :   }
     199       62549 :   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
     200             :   size_t size =
     201       62549 :       RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
     202       62549 :                 MemoryChunk::kPageSize);
     203             :   DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
     204             : 
     205       62549 :   LOG(isolate_,
     206             :       NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
     207             :                requested));
     208             : 
     209       62549 :   heap_reservation_.TakeControl(&reservation);
     210      125098 :   code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
     211             :       page_allocator, aligned_base, size,
     212             :       static_cast<size_t>(MemoryChunk::kAlignment));
     213       62549 :   code_page_allocator_ = code_page_allocator_instance_.get();
     214             : }
     215             : 
     216       62534 : void MemoryAllocator::TearDown() {
     217       62534 :   unmapper()->TearDown();
     218             : 
     219             :   // Check that spaces were torn down before MemoryAllocator.
     220             :   DCHECK_EQ(size_, 0u);
     221             :   // TODO(gc) this will be true again when we fix FreeMemory.
     222             :   // DCHECK_EQ(0, size_executable_);
     223       62533 :   capacity_ = 0;
     224             : 
     225       62533 :   if (last_chunk_.IsReserved()) {
     226           0 :     last_chunk_.Free();
     227             :   }
     228             : 
     229       62533 :   if (code_page_allocator_instance_.get()) {
     230             :     DCHECK(!code_range_.is_empty());
     231             :     code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
     232       62534 :                                                             code_range_.size());
     233       62534 :     code_range_ = base::AddressRegion();
     234             :     code_page_allocator_instance_.reset();
     235             :   }
     236       62534 :   code_page_allocator_ = nullptr;
     237       62534 :   data_page_allocator_ = nullptr;
     238       62534 : }
     239             : 
     240      344406 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
     241             :  public:
     242             :   explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
     243             :       : CancelableTask(isolate),
     244             :         unmapper_(unmapper),
     245      344682 :         tracer_(isolate->heap()->tracer()) {}
     246             : 
     247             :  private:
     248      168079 :   void RunInternal() override {
     249      672311 :     TRACE_BACKGROUND_GC(tracer_,
     250             :                         GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
     251      168030 :     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     252      168177 :     unmapper_->active_unmapping_tasks_--;
     253      168177 :     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
     254      168207 :     if (FLAG_trace_unmapper) {
     255           0 :       PrintIsolate(unmapper_->heap_->isolate(),
     256           0 :                    "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
     257             :     }
     258      168219 :   }
     259             : 
     260             :   Unmapper* const unmapper_;
     261             :   GCTracer* const tracer_;
     262             :   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
     263             : };
     264             : 
     265      249769 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
     266      249769 :   if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
     267      173262 :     if (!MakeRoomForNewTasks()) {
     268             :       // kMaxUnmapperTasks are already running. Avoid creating any more.
     269         921 :       if (FLAG_trace_unmapper) {
     270           0 :         PrintIsolate(heap_->isolate(),
     271             :                      "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
     272           0 :                      kMaxUnmapperTasks);
     273             :       }
     274         921 :       return;
     275             :     }
     276      344682 :     auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
     277      172341 :     if (FLAG_trace_unmapper) {
     278           0 :       PrintIsolate(heap_->isolate(),
     279             :                    "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
     280           0 :                    task->id());
     281             :     }
     282             :     DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
     283             :     DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
     284             :     DCHECK_GE(active_unmapping_tasks_, 0);
     285             :     active_unmapping_tasks_++;
     286      344682 :     task_ids_[pending_unmapping_tasks_++] = task->id();
     287      517023 :     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     288             :   } else {
     289       76507 :     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     290             :   }
     291             : }
     292             : 
     293      166863 : void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
     294      511545 :   for (int i = 0; i < pending_unmapping_tasks_; i++) {
     295      344682 :     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
     296             :         TryAbortResult::kTaskAborted) {
     297      168226 :       pending_unmapping_tasks_semaphore_.Wait();
     298             :     }
     299             :   }
     300      166863 :   pending_unmapping_tasks_ = 0;
     301             :   active_unmapping_tasks_ = 0;
     302             : 
     303      166863 :   if (FLAG_trace_unmapper) {
     304             :     PrintIsolate(
     305           0 :         heap_->isolate(),
     306           0 :         "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
     307             :   }
     308      166863 : }
     309             : 
     310       94928 : void MemoryAllocator::Unmapper::PrepareForGC() {
     311             :   // Free non-regular chunks because they cannot be re-used.
     312       94928 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     313       94928 : }
     314             : 
     315       70319 : void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
     316       70319 :   CancelAndWaitForPendingTasks();
     317       70320 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     318       70319 : }
     319             : 
     320      173262 : bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
     321             :   DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
     322             : 
     323      173262 :   if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
     324             :     // All previous unmapping tasks have been run to completion.
     325             :     // Finalize those tasks to make room for new ones.
     326       96544 :     CancelAndWaitForPendingTasks();
     327             :   }
     328      173262 :   return pending_unmapping_tasks_ != kMaxUnmapperTasks;
     329             : }
     330             : 
     331      472459 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
     332             :   MemoryChunk* chunk = nullptr;
     333      494831 :   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     334       11186 :     allocator_->PerformFreeMemory(chunk);
     335             :   }
     336      472509 : }
     337             : 
     338             : template <MemoryAllocator::Unmapper::FreeMode mode>
     339      377223 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
     340             :   MemoryChunk* chunk = nullptr;
     341      377223 :   if (FLAG_trace_unmapper) {
     342           0 :     PrintIsolate(
     343           0 :         heap_->isolate(),
     344             :         "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
     345             :         NumberOfChunks());
     346             :   }
     347             :   // Regular chunks.
     348      794194 :   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
     349             :     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
     350      416884 :     allocator_->PerformFreeMemory(chunk);
     351      416865 :     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
     352             :   }
     353             :   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
     354             :     // The previous loop uncommitted any pages marked as pooled and added them
     355             :     // to the pooled list. In case of kReleasePooled we need to free them
     356             :     // though.
     357      372550 :     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
     358      372553 :       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
     359             :     }
     360             :   }
     361      377587 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     362      377574 : }
     363             : 
     364       62536 : void MemoryAllocator::Unmapper::TearDown() {
     365       62536 :   CHECK_EQ(0, pending_unmapping_tasks_);
     366       62536 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     367             :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     368             :     DCHECK(chunks_[i].empty());
     369             :   }
     370       62535 : }
     371             : 
     372           0 : size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
     373           0 :   base::MutexGuard guard(&mutex_);
     374           0 :   return chunks_[kRegular].size() + chunks_[kNonRegular].size();
     375             : }
     376             : 
     377           5 : int MemoryAllocator::Unmapper::NumberOfChunks() {
     378           5 :   base::MutexGuard guard(&mutex_);
     379             :   size_t result = 0;
     380          35 :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     381          30 :     result += chunks_[i].size();
     382             :   }
     383          10 :   return static_cast<int>(result);
     384             : }
     385             : 
     386           0 : size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
     387           0 :   base::MutexGuard guard(&mutex_);
     388             : 
     389             :   size_t sum = 0;
     390             :   // kPooled chunks are already uncommited. We only have to account for
     391             :   // kRegular and kNonRegular chunks.
     392           0 :   for (auto& chunk : chunks_[kRegular]) {
     393           0 :     sum += chunk->size();
     394             :   }
     395           0 :   for (auto& chunk : chunks_[kNonRegular]) {
     396           0 :     sum += chunk->size();
     397             :   }
     398           0 :   return sum;
     399             : }
     400             : 
     401       58876 : bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
     402             :   Address base = reservation->address();
     403             :   size_t size = reservation->size();
     404       58876 :   if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
     405             :     return false;
     406             :   }
     407       58876 :   UpdateAllocatedSpaceLimits(base, base + size);
     408      117752 :   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
     409       58876 :   return true;
     410             : }
     411             : 
     412      408890 : bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
     413             :   size_t size = reservation->size();
     414      408890 :   if (!reservation->SetPermissions(reservation->address(), size,
     415             :                                    PageAllocator::kNoAccess)) {
     416             :     return false;
     417             :   }
     418      817764 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
     419      408885 :   return true;
     420             : }
     421             : 
     422      434071 : void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
     423             :                                  Address base, size_t size) {
     424      434071 :   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
     425      434071 : }
     426             : 
     427      869031 : Address MemoryAllocator::AllocateAlignedMemory(
     428             :     size_t reserve_size, size_t commit_size, size_t alignment,
     429             :     Executability executable, void* hint, VirtualMemory* controller) {
     430             :   v8::PageAllocator* page_allocator = this->page_allocator(executable);
     431             :   DCHECK(commit_size <= reserve_size);
     432     1738061 :   VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
     433      869029 :   if (!reservation.IsReserved()) return kNullAddress;
     434             :   Address base = reservation.address();
     435             :   size_ += reservation.size();
     436             : 
     437      869031 :   if (executable == EXECUTABLE) {
     438      129754 :     if (!CommitExecutableMemory(&reservation, base, commit_size,
     439             :                                 reserve_size)) {
     440             :       base = kNullAddress;
     441             :     }
     442             :   } else {
     443      739277 :     if (reservation.SetPermissions(base, commit_size,
     444             :                                    PageAllocator::kReadWrite)) {
     445      739278 :       UpdateAllocatedSpaceLimits(base, base + commit_size);
     446             :     } else {
     447             :       base = kNullAddress;
     448             :     }
     449             :   }
     450             : 
     451      869032 :   if (base == kNullAddress) {
     452             :     // Failed to commit the body. Free the mapping and any partially committed
     453             :     // regions inside it.
     454           0 :     reservation.Free();
     455             :     size_ -= reserve_size;
     456           0 :     return kNullAddress;
     457             :   }
     458             : 
     459      869032 :   controller->TakeControl(&reservation);
     460      869032 :   return base;
     461             : }
     462             : 
     463     6593787 : void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
     464             :   base::AddressRegion memory_area =
     465     6593787 :       MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
     466     6591015 :   if (memory_area.size() != 0) {
     467       59411 :     MemoryAllocator* memory_allocator = heap_->memory_allocator();
     468             :     v8::PageAllocator* page_allocator =
     469             :         memory_allocator->page_allocator(executable());
     470       59411 :     CHECK(page_allocator->DiscardSystemPages(
     471             :         reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
     472             :   }
     473     6590957 : }
     474             : 
     475           0 : size_t MemoryChunkLayout::CodePageGuardStartOffset() {
     476             :   // We are guarding code pages: the first OS page after the header
     477             :   // will be protected as non-writable.
     478           0 :   return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
     479             : }
     480             : 
     481         500 : size_t MemoryChunkLayout::CodePageGuardSize() {
     482     7213095 :   return MemoryAllocator::GetCommitPageSize();
     483             : }
     484             : 
     485     6953104 : intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
     486             :   // We are guarding code pages: the first OS page after the header
     487             :   // will be protected as non-writable.
     488     6953087 :   return CodePageGuardStartOffset() + CodePageGuardSize();
     489             : }
     490             : 
     491           0 : intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
     492             :   // We are guarding code pages: the last OS page will be protected as
     493             :   // non-writable.
     494      568787 :   return Page::kPageSize -
     495      568787 :          static_cast<int>(MemoryAllocator::GetCommitPageSize());
     496             : }
     497             : 
     498      568787 : size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
     499      568787 :   size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
     500             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     501      568787 :   return memory;
     502             : }
     503             : 
     504           5 : intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
     505           5 :   return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
     506             : }
     507             : 
     508        1000 : size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
     509             :     AllocationSpace space) {
     510       59876 :   if (space == CODE_SPACE) {
     511         500 :     return ObjectStartOffsetInCodePage();
     512             :   }
     513             :   return ObjectStartOffsetInDataPage();
     514             : }
     515             : 
     516     1840986 : size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
     517             :   size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
     518             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     519     1840986 :   return memory;
     520             : }
     521             : 
     522     1341339 : size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     523             :     AllocationSpace space) {
     524     1809581 :   if (space == CODE_SPACE) {
     525      568787 :     return AllocatableMemoryInCodePage();
     526             :   }
     527             :   return AllocatableMemoryInDataPage();
     528             : }
     529             : 
     530           0 : Heap* MemoryChunk::synchronized_heap() {
     531             :   return reinterpret_cast<Heap*>(
     532           0 :       base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
     533             : }
     534             : 
     535           0 : void MemoryChunk::InitializationMemoryFence() {
     536             :   base::SeqCst_MemoryFence();
     537             : #ifdef THREAD_SANITIZER
     538             :   // Since TSAN does not process memory fences, we use the following annotation
     539             :   // to tell TSAN that there is no data race when emitting a
     540             :   // InitializationMemoryFence. Note that the other thread still needs to
     541             :   // perform MemoryChunk::synchronized_heap().
     542             :   base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
     543             :                       reinterpret_cast<base::AtomicWord>(heap_));
     544             : #endif
     545           0 : }
     546             : 
     547     3074583 : void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     548             :     PageAllocator::Permission permission) {
     549             :   DCHECK(permission == PageAllocator::kRead ||
     550             :          permission == PageAllocator::kReadExecute);
     551             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     552             :   DCHECK(owner()->identity() == CODE_SPACE ||
     553             :          owner()->identity() == CODE_LO_SPACE);
     554             :   // Decrementing the write_unprotect_counter_ and changing the page
     555             :   // protection mode has to be atomic.
     556     3074583 :   base::MutexGuard guard(page_protection_change_mutex_);
     557     3074585 :   if (write_unprotect_counter_ == 0) {
     558             :     // This is a corner case that may happen when we have a
     559             :     // CodeSpaceMemoryModificationScope open and this page was newly
     560             :     // added.
     561             :     return;
     562             :   }
     563     3074585 :   write_unprotect_counter_--;
     564             :   DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     565     3074585 :   if (write_unprotect_counter_ == 0) {
     566             :     Address protect_start =
     567     3028201 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     568             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     569             :     DCHECK(IsAligned(protect_start, page_size));
     570             :     size_t protect_size = RoundUp(area_size(), page_size);
     571     3028198 :     CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
     572             :   }
     573             : }
     574             : 
     575       71923 : void MemoryChunk::SetReadable() {
     576       87121 :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
     577       71923 : }
     578             : 
     579     2561165 : void MemoryChunk::SetReadAndExecutable() {
     580             :   DCHECK(!FLAG_jitless);
     581             :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     582     2987463 :       PageAllocator::kReadExecute);
     583     2561165 : }
     584             : 
     585     3012727 : void MemoryChunk::SetReadAndWritable() {
     586             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     587             :   DCHECK(owner()->identity() == CODE_SPACE ||
     588             :          owner()->identity() == CODE_LO_SPACE);
     589             :   // Incrementing the write_unprotect_counter_ and changing the page
     590             :   // protection mode has to be atomic.
     591     3012727 :   base::MutexGuard guard(page_protection_change_mutex_);
     592     3012739 :   write_unprotect_counter_++;
     593             :   DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     594     3012739 :   if (write_unprotect_counter_ == 1) {
     595             :     Address unprotect_start =
     596     2966358 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     597             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     598             :     DCHECK(IsAligned(unprotect_start, page_size));
     599             :     size_t unprotect_size = RoundUp(area_size(), page_size);
     600     2966355 :     CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
     601             :                                       PageAllocator::kReadWrite));
     602             :   }
     603     3012741 : }
     604             : 
     605             : namespace {
     606             : 
     607             : PageAllocator::Permission DefaultWritableCodePermissions() {
     608             :   return FLAG_jitless ? PageAllocator::kReadWrite
     609           0 :                       : PageAllocator::kReadWriteExecute;
     610             : }
     611             : 
     612             : }  // namespace
     613             : 
     614      927908 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
     615             :                                      Address area_start, Address area_end,
     616             :                                      Executability executable, Space* owner,
     617             :                                      VirtualMemory reservation) {
     618             :   MemoryChunk* chunk = FromAddress(base);
     619             : 
     620             :   DCHECK_EQ(base, chunk->address());
     621             : 
     622      927908 :   chunk->heap_ = heap;
     623      927908 :   chunk->size_ = size;
     624      927908 :   chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
     625             :   DCHECK(HasHeaderSentinel(area_start));
     626      927908 :   chunk->area_start_ = area_start;
     627      927908 :   chunk->area_end_ = area_end;
     628      927908 :   chunk->flags_ = Flags(NO_FLAGS);
     629             :   chunk->set_owner(owner);
     630             :   chunk->InitializeReservedMemory();
     631      927908 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
     632      927908 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
     633      927908 :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
     634             :                                        nullptr);
     635      927908 :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
     636             :                                        nullptr);
     637      927908 :   chunk->invalidated_slots_ = nullptr;
     638      927908 :   chunk->skip_list_ = nullptr;
     639             :   chunk->progress_bar_ = 0;
     640      927908 :   chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
     641             :   chunk->set_concurrent_sweeping_state(kSweepingDone);
     642      927908 :   chunk->page_protection_change_mutex_ = new base::Mutex();
     643      927907 :   chunk->write_unprotect_counter_ = 0;
     644      927907 :   chunk->mutex_ = new base::Mutex();
     645      927906 :   chunk->allocated_bytes_ = chunk->area_size();
     646      927906 :   chunk->wasted_memory_ = 0;
     647      927906 :   chunk->young_generation_bitmap_ = nullptr;
     648      927906 :   chunk->marking_bitmap_ = nullptr;
     649      927906 :   chunk->local_tracker_ = nullptr;
     650             : 
     651             :   chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
     652             :       0;
     653             :   chunk->external_backing_store_bytes_
     654             :       [ExternalBackingStoreType::kExternalString] = 0;
     655             : 
     656    12062730 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     657     5567412 :     chunk->categories_[i] = nullptr;
     658             :   }
     659             : 
     660             :   chunk->AllocateMarkingBitmap();
     661      927906 :   if (owner->identity() == RO_SPACE) {
     662             :     heap->incremental_marking()
     663             :         ->non_atomic_marking_state()
     664             :         ->bitmap(chunk)
     665             :         ->MarkAllBits();
     666             :   } else {
     667             :     heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
     668             :                                                                           0);
     669             :   }
     670             : 
     671             :   DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
     672             :   DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
     673             :   DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
     674             : 
     675      927906 :   if (executable == EXECUTABLE) {
     676             :     chunk->SetFlag(IS_EXECUTABLE);
     677      129753 :     if (heap->write_protect_code_memory()) {
     678             :       chunk->write_unprotect_counter_ =
     679      129753 :           heap->code_space_memory_modification_scope_depth();
     680             :     } else {
     681             :       size_t page_size = MemoryAllocator::GetCommitPageSize();
     682             :       DCHECK(IsAligned(area_start, page_size));
     683           0 :       size_t area_size = RoundUp(area_end - area_start, page_size);
     684           0 :       CHECK(reservation.SetPermissions(area_start, area_size,
     685             :                                        DefaultWritableCodePermissions()));
     686             :     }
     687             :   }
     688             : 
     689             :   chunk->reservation_ = std::move(reservation);
     690             : 
     691      927907 :   return chunk;
     692             : }
     693             : 
     694      425019 : Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     695             :   Page* page = static_cast<Page*>(chunk);
     696             :   DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     697             :                 page->owner()->identity()),
     698             :             page->area_size());
     699             :   // Make sure that categories are initialized before freeing the area.
     700             :   page->ResetAllocatedBytes();
     701             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     702      425019 :   page->AllocateFreeListCategories();
     703             :   page->InitializeFreeListCategories();
     704             :   page->list_node().Initialize();
     705             :   page->InitializationMemoryFence();
     706      425019 :   return page;
     707             : }
     708             : 
     709      432380 : Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     710             :   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
     711             :   bool in_to_space = (id() != kFromSpace);
     712      432380 :   chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
     713             :   Page* page = static_cast<Page*>(chunk);
     714             :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     715      432380 :   page->AllocateLocalTracker();
     716             :   page->list_node().Initialize();
     717             : #ifdef ENABLE_MINOR_MC
     718      432380 :   if (FLAG_minor_mc) {
     719             :     page->AllocateYoungGenerationBitmap();
     720             :     heap()
     721             :         ->minor_mark_compact_collector()
     722             :         ->non_atomic_marking_state()
     723           0 :         ->ClearLiveness(page);
     724             :   }
     725             : #endif  // ENABLE_MINOR_MC
     726             :   page->InitializationMemoryFence();
     727      432380 :   return page;
     728             : }
     729             : 
     730       70444 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
     731             :                                  Executability executable) {
     732       70444 :   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
     733             :     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
     734           0 :     FATAL("Code page is too large.");
     735             :   }
     736             : 
     737             :   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
     738             : 
     739             :   // Initialize the sentinel value for each page boundary since the mutator
     740             :   // may initialize the object starting from its end.
     741             :   Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
     742       70444 :                      MemoryChunk::kPageSize;
     743      331100 :   while (sentinel < chunk->area_end()) {
     744      130328 :     *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
     745      130328 :     sentinel += MemoryChunk::kPageSize;
     746             :   }
     747             : 
     748             :   LargePage* page = static_cast<LargePage*>(chunk);
     749             :   page->SetFlag(MemoryChunk::LARGE_PAGE);
     750             :   page->list_node().Initialize();
     751       70444 :   return page;
     752             : }
     753             : 
     754      425017 : void Page::AllocateFreeListCategories() {
     755     5525249 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     756             :     categories_[i] = new FreeListCategory(
     757     5100229 :         reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
     758             :   }
     759      425020 : }
     760             : 
     761          91 : void Page::InitializeFreeListCategories() {
     762     5526430 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     763     2550660 :     categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
     764             :   }
     765          91 : }
     766             : 
     767           0 : void Page::ReleaseFreeListCategories() {
     768    11145303 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     769     5143984 :     if (categories_[i] != nullptr) {
     770     2549721 :       delete categories_[i];
     771     2549720 :       categories_[i] = nullptr;
     772             :     }
     773             :   }
     774           0 : }
     775             : 
     776         937 : Page* Page::ConvertNewToOld(Page* old_page) {
     777             :   DCHECK(old_page);
     778             :   DCHECK(old_page->InNewSpace());
     779             :   OldSpace* old_space = old_page->heap()->old_space();
     780             :   old_page->set_owner(old_space);
     781             :   old_page->SetFlags(0, static_cast<uintptr_t>(~0));
     782         937 :   Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
     783         937 :   old_space->AddPage(new_page);
     784         937 :   return new_page;
     785             : }
     786             : 
     787       20512 : size_t MemoryChunk::CommittedPhysicalMemory() {
     788       41024 :   if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
     789       17414 :     return size();
     790        3098 :   return high_water_mark_;
     791             : }
     792             : 
     793        7351 : bool MemoryChunk::InOldSpace() const {
     794        7351 :   return owner()->identity() == OLD_SPACE;
     795             : }
     796             : 
     797           0 : bool MemoryChunk::InLargeObjectSpace() const {
     798           0 :   return owner()->identity() == LO_SPACE;
     799             : }
     800             : 
     801      869028 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
     802             :                                             size_t commit_area_size,
     803             :                                             Executability executable,
     804             :                                             Space* owner) {
     805             :   DCHECK_LE(commit_area_size, reserve_area_size);
     806             : 
     807             :   size_t chunk_size;
     808      869028 :   Heap* heap = isolate_->heap();
     809             :   Address base = kNullAddress;
     810     1738060 :   VirtualMemory reservation;
     811             :   Address area_start = kNullAddress;
     812             :   Address area_end = kNullAddress;
     813             :   void* address_hint =
     814             :       AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
     815             : 
     816             :   //
     817             :   // MemoryChunk layout:
     818             :   //
     819             :   //             Executable
     820             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     821             :   // |           Header           |
     822             :   // +----------------------------+<- base + CodePageGuardStartOffset
     823             :   // |           Guard            |
     824             :   // +----------------------------+<- area_start_
     825             :   // |           Area             |
     826             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     827             :   // |   Committed but not used   |
     828             :   // +----------------------------+<- aligned at OS page boundary
     829             :   // | Reserved but not committed |
     830             :   // +----------------------------+<- aligned at OS page boundary
     831             :   // |           Guard            |
     832             :   // +----------------------------+<- base + chunk_size
     833             :   //
     834             :   //           Non-executable
     835             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     836             :   // |          Header            |
     837             :   // +----------------------------+<- area_start_ (base + area_start_)
     838             :   // |           Area             |
     839             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     840             :   // |  Committed but not used    |
     841             :   // +----------------------------+<- aligned at OS page boundary
     842             :   // | Reserved but not committed |
     843             :   // +----------------------------+<- base + chunk_size
     844             :   //
     845             : 
     846      869032 :   if (executable == EXECUTABLE) {
     847      259508 :     chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
     848             :                                reserve_area_size +
     849             :                                MemoryChunkLayout::CodePageGuardSize(),
     850             :                            GetCommitPageSize());
     851             : 
     852             :     // Size of header (not executable) plus area (executable).
     853      129754 :     size_t commit_size = ::RoundUp(
     854             :         MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
     855             :         GetCommitPageSize());
     856             :     base =
     857             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     858      129754 :                               executable, address_hint, &reservation);
     859      129754 :     if (base == kNullAddress) return nullptr;
     860             :     // Update executable memory size.
     861             :     size_executable_ += reservation.size();
     862             : 
     863             :     if (Heap::ShouldZapGarbage()) {
     864             :       ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
     865             :       ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
     866             :                commit_area_size, kZapValue);
     867             :     }
     868             : 
     869      129754 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     870      129754 :     area_end = area_start + commit_area_size;
     871             :   } else {
     872      739278 :     chunk_size = ::RoundUp(
     873             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
     874             :         GetCommitPageSize());
     875      739278 :     size_t commit_size = ::RoundUp(
     876             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     877             :         GetCommitPageSize());
     878             :     base =
     879             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     880      739278 :                               executable, address_hint, &reservation);
     881             : 
     882      739278 :     if (base == kNullAddress) return nullptr;
     883             : 
     884             :     if (Heap::ShouldZapGarbage()) {
     885             :       ZapBlock(
     886             :           base,
     887             :           MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     888             :           kZapValue);
     889             :     }
     890             : 
     891      739278 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
     892      739278 :     area_end = area_start + commit_area_size;
     893             :   }
     894             : 
     895             :   // Use chunk_size for statistics and callbacks because we assume that they
     896             :   // treat reserved but not-yet committed memory regions of chunks as allocated.
     897      869032 :   isolate_->counters()->memory_allocated()->Increment(
     898      869032 :       static_cast<int>(chunk_size));
     899             : 
     900      869032 :   LOG(isolate_,
     901             :       NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
     902             : 
     903             :   // We cannot use the last chunk in the address space because we would
     904             :   // overflow when comparing top and limit if this chunk is used for a
     905             :   // linear allocation area.
     906      869032 :   if ((base + chunk_size) == 0u) {
     907           0 :     CHECK(!last_chunk_.IsReserved());
     908           0 :     last_chunk_.TakeControl(&reservation);
     909           0 :     UncommitMemory(&last_chunk_);
     910             :     size_ -= chunk_size;
     911           0 :     if (executable == EXECUTABLE) {
     912             :       size_executable_ -= chunk_size;
     913             :     }
     914           0 :     CHECK(last_chunk_.IsReserved());
     915             :     return AllocateChunk(reserve_area_size, commit_area_size, executable,
     916           0 :                          owner);
     917             :   }
     918             : 
     919             :   MemoryChunk* chunk =
     920      869031 :       MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
     921      869032 :                               executable, owner, std::move(reservation));
     922             : 
     923      869031 :   if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
     924             :   return chunk;
     925             : }
     926             : 
     927      286680 : void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
     928      773271 :   if (is_marking) {
     929             :     SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     930             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     931             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     932             :   } else {
     933             :     ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     934             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     935             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     936             :   }
     937      286680 : }
     938             : 
     939      265435 : void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
     940             :   SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     941      711175 :   if (is_marking) {
     942             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     943             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     944             :   } else {
     945             :     ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     946             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     947             :   }
     948      265435 : }
     949             : 
     950     1366369 : void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
     951             : 
     952      445884 : void Page::AllocateLocalTracker() {
     953             :   DCHECK_NULL(local_tracker_);
     954      891768 :   local_tracker_ = new LocalArrayBufferTracker(this);
     955      445884 : }
     956             : 
     957       17802 : bool Page::contains_array_buffers() {
     958       17802 :   return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
     959             : }
     960             : 
     961           0 : void Page::ResetFreeListStatistics() {
     962      478591 :   wasted_memory_ = 0;
     963           0 : }
     964             : 
     965           0 : size_t Page::AvailableInFreeList() {
     966           0 :   size_t sum = 0;
     967             :   ForAllFreeListCategories([&sum](FreeListCategory* category) {
     968           0 :     sum += category->available();
     969             :   });
     970           0 :   return sum;
     971             : }
     972             : 
     973             : #ifdef DEBUG
     974             : namespace {
     975             : // Skips filler starting from the given filler until the end address.
     976             : // Returns the first address after the skipped fillers.
     977             : Address SkipFillers(HeapObject filler, Address end) {
     978             :   Address addr = filler->address();
     979             :   while (addr < end) {
     980             :     filler = HeapObject::FromAddress(addr);
     981             :     CHECK(filler->IsFiller());
     982             :     addr = filler->address() + filler->Size();
     983             :   }
     984             :   return addr;
     985             : }
     986             : }  // anonymous namespace
     987             : #endif  // DEBUG
     988             : 
     989      184299 : size_t Page::ShrinkToHighWaterMark() {
     990             :   // Shrinking only makes sense outside of the CodeRange, where we don't care
     991             :   // about address space fragmentation.
     992             :   VirtualMemory* reservation = reserved_memory();
     993      184299 :   if (!reservation->IsReserved()) return 0;
     994             : 
     995             :   // Shrink pages to high water mark. The water mark points either to a filler
     996             :   // or the area_end.
     997      368598 :   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
     998      184299 :   if (filler->address() == area_end()) return 0;
     999      184294 :   CHECK(filler->IsFiller());
    1000             :   // Ensure that no objects were allocated in [filler, area_end) region.
    1001             :   DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
    1002             :   // Ensure that no objects will be allocated on this page.
    1003             :   DCHECK_EQ(0u, AvailableInFreeList());
    1004             : 
    1005      184294 :   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
    1006             :                             MemoryAllocator::GetCommitPageSize());
    1007      184294 :   if (unused > 0) {
    1008             :     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
    1009      184274 :     if (FLAG_trace_gc_verbose) {
    1010           0 :       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
    1011             :                    reinterpret_cast<void*>(this),
    1012             :                    reinterpret_cast<void*>(area_end()),
    1013           0 :                    reinterpret_cast<void*>(area_end() - unused));
    1014             :     }
    1015             :     heap()->CreateFillerObjectAt(
    1016             :         filler->address(),
    1017             :         static_cast<int>(area_end() - filler->address() - unused),
    1018      368548 :         ClearRecordedSlots::kNo);
    1019      368548 :     heap()->memory_allocator()->PartialFreeMemory(
    1020      368548 :         this, address() + size() - unused, unused, area_end() - unused);
    1021      184274 :     if (filler->address() != area_end()) {
    1022      184274 :       CHECK(filler->IsFiller());
    1023      184274 :       CHECK_EQ(filler->address() + filler->Size(), area_end());
    1024             :     }
    1025             :   }
    1026             :   return unused;
    1027             : }
    1028             : 
    1029      127771 : void Page::CreateBlackArea(Address start, Address end) {
    1030             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1031             :   DCHECK_EQ(Page::FromAddress(start), this);
    1032             :   DCHECK_NE(start, end);
    1033             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1034             :   IncrementalMarking::MarkingState* marking_state =
    1035             :       heap()->incremental_marking()->marking_state();
    1036      127771 :   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
    1037      127771 :                                         AddressToMarkbitIndex(end));
    1038      127771 :   marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
    1039      127771 : }
    1040             : 
    1041        5399 : void Page::DestroyBlackArea(Address start, Address end) {
    1042             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1043             :   DCHECK_EQ(Page::FromAddress(start), this);
    1044             :   DCHECK_NE(start, end);
    1045             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1046             :   IncrementalMarking::MarkingState* marking_state =
    1047             :       heap()->incremental_marking()->marking_state();
    1048        5399 :   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
    1049        5399 :                                           AddressToMarkbitIndex(end));
    1050        5399 :   marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
    1051        5399 : }
    1052             : 
    1053      184362 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1054             :                                         size_t bytes_to_free,
    1055             :                                         Address new_area_end) {
    1056             :   VirtualMemory* reservation = chunk->reserved_memory();
    1057             :   DCHECK(reservation->IsReserved());
    1058      184362 :   chunk->size_ -= bytes_to_free;
    1059      184362 :   chunk->area_end_ = new_area_end;
    1060      184362 :   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
    1061             :     // Add guard page at the end.
    1062       61423 :     size_t page_size = GetCommitPageSize();
    1063             :     DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
    1064             :     DCHECK_EQ(chunk->address() + chunk->size(),
    1065             :               chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
    1066       61423 :     reservation->SetPermissions(chunk->area_end_, page_size,
    1067       61423 :                                 PageAllocator::kNoAccess);
    1068             :   }
    1069             :   // On e.g. Windows, a reservation may be larger than a page and releasing
    1070             :   // partially starting at |start_free| will also release the potentially
    1071             :   // unused part behind the current page.
    1072      184362 :   const size_t released_bytes = reservation->Release(start_free);
    1073             :   DCHECK_GE(size_, released_bytes);
    1074             :   size_ -= released_bytes;
    1075      184361 :   isolate_->counters()->memory_allocated()->Decrement(
    1076      184361 :       static_cast<int>(released_bytes));
    1077      184362 : }
    1078             : 
    1079      927777 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
    1080             :   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1081      927777 :   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
    1082             : 
    1083      927777 :   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
    1084      927777 :                                          chunk->IsEvacuationCandidate());
    1085             : 
    1086             :   VirtualMemory* reservation = chunk->reserved_memory();
    1087             :   const size_t size =
    1088      927779 :       reservation->IsReserved() ? reservation->size() : chunk->size();
    1089             :   DCHECK_GE(size_, static_cast<size_t>(size));
    1090             :   size_ -= size;
    1091     1855558 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    1092      927786 :   if (chunk->executable() == EXECUTABLE) {
    1093             :     DCHECK_GE(size_executable_, size);
    1094             :     size_executable_ -= size;
    1095             :   }
    1096             : 
    1097             :   chunk->SetFlag(MemoryChunk::PRE_FREED);
    1098             : 
    1099      927786 :   if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
    1100      927785 : }
    1101             : 
    1102             : 
    1103      905251 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
    1104             :   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1105      905251 :   chunk->ReleaseAllocatedMemory();
    1106             : 
    1107             :   VirtualMemory* reservation = chunk->reserved_memory();
    1108      905250 :   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
    1109      408893 :     UncommitMemory(reservation);
    1110             :   } else {
    1111      496357 :     if (reservation->IsReserved()) {
    1112      434838 :       reservation->Free();
    1113             :     } else {
    1114             :       // Only read-only pages can have non-initialized reservation object.
    1115             :       DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
    1116             :       FreeMemory(page_allocator(chunk->executable()), chunk->address(),
    1117       61519 :                  chunk->size());
    1118             :     }
    1119             :   }
    1120      905237 : }
    1121             : 
    1122             : template <MemoryAllocator::FreeMode mode>
    1123     1300329 : void MemoryAllocator::Free(MemoryChunk* chunk) {
    1124             :   switch (mode) {
    1125             :     case kFull:
    1126      477178 :       PreFreeMemory(chunk);
    1127      477182 :       PerformFreeMemory(chunk);
    1128             :       break;
    1129             :     case kAlreadyPooled:
    1130             :       // Pooled pages cannot be touched anymore as their memory is uncommitted.
    1131             :       // Pooled pages are not-executable.
    1132      372553 :       FreeMemory(data_page_allocator(), chunk->address(),
    1133             :                  static_cast<size_t>(MemoryChunk::kPageSize));
    1134             :       break;
    1135             :     case kPooledAndQueue:
    1136             :       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
    1137             :       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
    1138             :       chunk->SetFlag(MemoryChunk::POOLED);
    1139             :       V8_FALLTHROUGH;
    1140             :     case kPreFreeAndQueue:
    1141      450598 :       PreFreeMemory(chunk);
    1142             :       // The chunks added to this queue will be freed by a concurrent thread.
    1143      450603 :       unmapper()->AddMemoryChunkSafe(chunk);
    1144             :       break;
    1145             :   }
    1146     1300335 : }
    1147             : 
    1148             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1149             :     MemoryAllocator::kFull>(MemoryChunk* chunk);
    1150             : 
    1151             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1152             :     MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
    1153             : 
    1154             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1155             :     MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
    1156             : 
    1157             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1158             :     MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
    1159             : 
    1160             : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
    1161      856460 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
    1162             :                                     Executability executable) {
    1163             :   MemoryChunk* chunk = nullptr;
    1164             :   if (alloc_mode == kPooled) {
    1165             :     DCHECK_EQ(size, static_cast<size_t>(
    1166             :                         MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1167             :                             owner->identity())));
    1168             :     DCHECK_EQ(executable, NOT_EXECUTABLE);
    1169      432380 :     chunk = AllocatePagePooled(owner);
    1170             :   }
    1171      432380 :   if (chunk == nullptr) {
    1172      797584 :     chunk = AllocateChunk(size, size, executable, owner);
    1173             :   }
    1174      856463 :   if (chunk == nullptr) return nullptr;
    1175      856463 :   return owner->InitializePage(chunk, executable);
    1176             : }
    1177             : 
    1178             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1179             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1180             :         size_t size, PagedSpace* owner, Executability executable);
    1181             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1182             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1183             :         size_t size, SemiSpace* owner, Executability executable);
    1184             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1185             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1186             :         size_t size, SemiSpace* owner, Executability executable);
    1187             : 
    1188       70445 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
    1189             :                                               LargeObjectSpace* owner,
    1190             :                                               Executability executable) {
    1191       70445 :   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
    1192       70444 :   if (chunk == nullptr) return nullptr;
    1193      140888 :   return LargePage::Initialize(isolate_->heap(), chunk, executable);
    1194             : }
    1195             : 
    1196             : template <typename SpaceType>
    1197      432380 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
    1198      432380 :   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
    1199      432380 :   if (chunk == nullptr) return nullptr;
    1200             :   const int size = MemoryChunk::kPageSize;
    1201       58876 :   const Address start = reinterpret_cast<Address>(chunk);
    1202             :   const Address area_start =
    1203             :       start +
    1204       58876 :       MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
    1205       58876 :   const Address area_end = start + size;
    1206             :   // Pooled pages are always regular data pages.
    1207             :   DCHECK_NE(CODE_SPACE, owner->identity());
    1208       58876 :   VirtualMemory reservation(data_page_allocator(), start, size);
    1209       58876 :   if (!CommitMemory(&reservation)) return nullptr;
    1210             :   if (Heap::ShouldZapGarbage()) {
    1211             :     ZapBlock(start, size, kZapValue);
    1212             :   }
    1213      117752 :   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
    1214             :                           NOT_EXECUTABLE, owner, std::move(reservation));
    1215             :   size_ += size;
    1216       58876 :   return chunk;
    1217             : }
    1218             : 
    1219           0 : void MemoryAllocator::ZapBlock(Address start, size_t size,
    1220             :                                uintptr_t zap_value) {
    1221             :   DCHECK(IsAligned(start, kTaggedSize));
    1222             :   DCHECK(IsAligned(size, kTaggedSize));
    1223           0 :   MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
    1224             :                size >> kTaggedSizeLog2);
    1225           0 : }
    1226             : 
    1227           5 : intptr_t MemoryAllocator::GetCommitPageSize() {
    1228    29777812 :   if (FLAG_v8_os_page_size != 0) {
    1229             :     DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
    1230        2081 :     return FLAG_v8_os_page_size * KB;
    1231             :   } else {
    1232    29775731 :     return CommitPageSize();
    1233             :   }
    1234             : }
    1235             : 
    1236     6595277 : base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
    1237             :                                                               size_t size) {
    1238     6592963 :   size_t page_size = MemoryAllocator::GetCommitPageSize();
    1239     6592963 :   if (size < page_size + FreeSpace::kSize) {
    1240     6528477 :     return base::AddressRegion(0, 0);
    1241             :   }
    1242       64486 :   Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
    1243       64486 :   Address discardable_end = RoundDown(addr + size, page_size);
    1244       64486 :   if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
    1245             :   return base::AddressRegion(discardable_start,
    1246       59427 :                              discardable_end - discardable_start);
    1247             : }
    1248             : 
    1249      129753 : bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
    1250             :                                              size_t commit_size,
    1251             :                                              size_t reserved_size) {
    1252      129754 :   const size_t page_size = GetCommitPageSize();
    1253             :   // All addresses and sizes must be aligned to the commit page size.
    1254             :   DCHECK(IsAligned(start, page_size));
    1255             :   DCHECK_EQ(0, commit_size % page_size);
    1256             :   DCHECK_EQ(0, reserved_size % page_size);
    1257             :   const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
    1258             :   const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
    1259             :   const size_t code_area_offset =
    1260      129754 :       MemoryChunkLayout::ObjectStartOffsetInCodePage();
    1261             :   // reserved_size includes two guard regions, commit_size does not.
    1262             :   DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
    1263      129754 :   const Address pre_guard_page = start + pre_guard_offset;
    1264      129754 :   const Address code_area = start + code_area_offset;
    1265      129754 :   const Address post_guard_page = start + reserved_size - guard_size;
    1266             :   // Commit the non-executable header, from start to pre-code guard page.
    1267      129754 :   if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
    1268             :     // Create the pre-code guard page, following the header.
    1269      129754 :     if (vm->SetPermissions(pre_guard_page, page_size,
    1270             :                            PageAllocator::kNoAccess)) {
    1271             :       // Commit the executable code body.
    1272      129752 :       if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
    1273             :                              PageAllocator::kReadWrite)) {
    1274             :         // Create the post-code guard page.
    1275      129754 :         if (vm->SetPermissions(post_guard_page, page_size,
    1276             :                                PageAllocator::kNoAccess)) {
    1277      129753 :           UpdateAllocatedSpaceLimits(start, code_area + commit_size);
    1278      129754 :           return true;
    1279             :         }
    1280           0 :         vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
    1281             :       }
    1282             :     }
    1283           0 :     vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
    1284             :   }
    1285             :   return false;
    1286             : }
    1287             : 
    1288             : 
    1289             : // -----------------------------------------------------------------------------
    1290             : // MemoryChunk implementation
    1291             : 
    1292      927781 : void MemoryChunk::ReleaseAllocatedMemory() {
    1293      927781 :   if (skip_list_ != nullptr) {
    1294       89390 :     delete skip_list_;
    1295       89390 :     skip_list_ = nullptr;
    1296             :   }
    1297      927781 :   if (mutex_ != nullptr) {
    1298      866262 :     delete mutex_;
    1299      866264 :     mutex_ = nullptr;
    1300             :   }
    1301      927783 :   if (page_protection_change_mutex_ != nullptr) {
    1302      927782 :     delete page_protection_change_mutex_;
    1303      927782 :     page_protection_change_mutex_ = nullptr;
    1304             :   }
    1305      927783 :   ReleaseSlotSet<OLD_TO_NEW>();
    1306      927775 :   ReleaseSlotSet<OLD_TO_OLD>();
    1307      927766 :   ReleaseTypedSlotSet<OLD_TO_NEW>();
    1308      927764 :   ReleaseTypedSlotSet<OLD_TO_OLD>();
    1309      927762 :   ReleaseInvalidatedSlots();
    1310      927760 :   if (local_tracker_ != nullptr) ReleaseLocalTracker();
    1311      927782 :   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
    1312      927782 :   if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
    1313             : 
    1314      927782 :   if (!IsLargePage()) {
    1315             :     Page* page = static_cast<Page*>(this);
    1316             :     page->ReleaseFreeListCategories();
    1317             :   }
    1318      927781 : }
    1319             : 
    1320       88541 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
    1321       88541 :   size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
    1322             :   DCHECK_LT(0, pages);
    1323       88541 :   SlotSet* slot_set = new SlotSet[pages];
    1324      268862 :   for (size_t i = 0; i < pages; i++) {
    1325       90159 :     slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
    1326             :   }
    1327       88544 :   return slot_set;
    1328             : }
    1329             : 
    1330             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
    1331             : template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
    1332             : 
    1333             : template <RememberedSetType type>
    1334       88541 : SlotSet* MemoryChunk::AllocateSlotSet() {
    1335       88541 :   SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
    1336       88543 :   SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
    1337             :       &slot_set_[type], nullptr, slot_set);
    1338       88543 :   if (old_slot_set != nullptr) {
    1339          51 :     delete[] slot_set;
    1340             :     slot_set = old_slot_set;
    1341             :   }
    1342             :   DCHECK(slot_set);
    1343       88543 :   return slot_set;
    1344             : }
    1345             : 
    1346             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
    1347             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
    1348             : 
    1349             : template <RememberedSetType type>
    1350     1867550 : void MemoryChunk::ReleaseSlotSet() {
    1351     1867550 :   SlotSet* slot_set = slot_set_[type];
    1352     1867550 :   if (slot_set) {
    1353       88488 :     slot_set_[type] = nullptr;
    1354       88488 :     delete[] slot_set;
    1355             :   }
    1356     1867555 : }
    1357             : 
    1358             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
    1359             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
    1360             : 
    1361             : template <RememberedSetType type>
    1362        9516 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
    1363        9516 :   TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
    1364        9516 :   TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
    1365             :       &typed_slot_set_[type], nullptr, typed_slot_set);
    1366        9516 :   if (old_value != nullptr) {
    1367           0 :     delete typed_slot_set;
    1368             :     typed_slot_set = old_value;
    1369             :   }
    1370             :   DCHECK(typed_slot_set);
    1371        9516 :   return typed_slot_set;
    1372             : }
    1373             : 
    1374             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
    1375             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
    1376             : 
    1377             : template <RememberedSetType type>
    1378     1858550 : void MemoryChunk::ReleaseTypedSlotSet() {
    1379     1858550 :   TypedSlotSet* typed_slot_set = typed_slot_set_[type];
    1380     1858550 :   if (typed_slot_set) {
    1381        9516 :     typed_slot_set_[type] = nullptr;
    1382        9516 :     delete typed_slot_set;
    1383             :   }
    1384     1858550 : }
    1385             : 
    1386         138 : InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
    1387             :   DCHECK_NULL(invalidated_slots_);
    1388         276 :   invalidated_slots_ = new InvalidatedSlots();
    1389         138 :   return invalidated_slots_;
    1390             : }
    1391             : 
    1392      928481 : void MemoryChunk::ReleaseInvalidatedSlots() {
    1393      928481 :   if (invalidated_slots_) {
    1394         275 :     delete invalidated_slots_;
    1395         138 :     invalidated_slots_ = nullptr;
    1396             :   }
    1397      928482 : }
    1398             : 
    1399       26037 : void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
    1400             :                                                      int size) {
    1401       26037 :   if (!ShouldSkipEvacuationSlotRecording()) {
    1402       20670 :     if (invalidated_slots() == nullptr) {
    1403         138 :       AllocateInvalidatedSlots();
    1404             :     }
    1405       20670 :     int old_size = (*invalidated_slots())[object];
    1406       41340 :     (*invalidated_slots())[object] = std::max(old_size, size);
    1407             :   }
    1408       26037 : }
    1409             : 
    1410           0 : bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
    1411           0 :   if (ShouldSkipEvacuationSlotRecording()) {
    1412             :     // Invalidated slots do not matter if we are not recording slots.
    1413             :     return true;
    1414             :   }
    1415           0 :   if (invalidated_slots() == nullptr) {
    1416             :     return false;
    1417             :   }
    1418             :   return invalidated_slots()->find(object) != invalidated_slots()->end();
    1419             : }
    1420             : 
    1421           5 : void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
    1422             :                                                  HeapObject new_start) {
    1423             :   DCHECK_LT(old_start, new_start);
    1424             :   DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
    1425             :             MemoryChunk::FromHeapObject(new_start));
    1426           5 :   if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
    1427             :     auto it = invalidated_slots()->find(old_start);
    1428           0 :     if (it != invalidated_slots()->end()) {
    1429           0 :       int old_size = it->second;
    1430           0 :       int delta = static_cast<int>(new_start->address() - old_start->address());
    1431             :       invalidated_slots()->erase(it);
    1432           0 :       (*invalidated_slots())[new_start] = old_size - delta;
    1433             :     }
    1434             :   }
    1435           5 : }
    1436             : 
    1437      445804 : void MemoryChunk::ReleaseLocalTracker() {
    1438             :   DCHECK_NOT_NULL(local_tracker_);
    1439      445804 :   delete local_tracker_;
    1440      445822 :   local_tracker_ = nullptr;
    1441      445822 : }
    1442             : 
    1443           0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
    1444             :   DCHECK_NULL(young_generation_bitmap_);
    1445           0 :   young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1446           0 : }
    1447             : 
    1448           0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
    1449             :   DCHECK_NOT_NULL(young_generation_bitmap_);
    1450           0 :   free(young_generation_bitmap_);
    1451           0 :   young_generation_bitmap_ = nullptr;
    1452           0 : }
    1453             : 
    1454           0 : void MemoryChunk::AllocateMarkingBitmap() {
    1455             :   DCHECK_NULL(marking_bitmap_);
    1456      927906 :   marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1457           0 : }
    1458             : 
    1459           0 : void MemoryChunk::ReleaseMarkingBitmap() {
    1460             :   DCHECK_NOT_NULL(marking_bitmap_);
    1461      927784 :   free(marking_bitmap_);
    1462      927784 :   marking_bitmap_ = nullptr;
    1463           0 : }
    1464             : 
    1465             : // -----------------------------------------------------------------------------
    1466             : // PagedSpace implementation
    1467             : 
    1468           0 : void Space::CheckOffsetsAreConsistent() const {
    1469             :   static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
    1470             :                 "ID offset inconsistent");
    1471             :   DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
    1472           0 : }
    1473             : 
    1474       86716 : void Space::AddAllocationObserver(AllocationObserver* observer) {
    1475      292799 :   allocation_observers_.push_back(observer);
    1476      292799 :   StartNextInlineAllocationStep();
    1477       86716 : }
    1478             : 
    1479      260511 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
    1480             :   auto it = std::find(allocation_observers_.begin(),
    1481      260511 :                       allocation_observers_.end(), observer);
    1482             :   DCHECK(allocation_observers_.end() != it);
    1483      260512 :   allocation_observers_.erase(it);
    1484      260512 :   StartNextInlineAllocationStep();
    1485      260511 : }
    1486             : 
    1487      775744 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
    1488             : 
    1489      290904 : void Space::ResumeAllocationObservers() {
    1490      775744 :   allocation_observers_paused_ = false;
    1491      290904 : }
    1492             : 
    1493   125419568 : void Space::AllocationStep(int bytes_since_last, Address soon_object,
    1494             :                            int size) {
    1495   125419568 :   if (!AllocationObserversActive()) {
    1496             :     return;
    1497             :   }
    1498             : 
    1499             :   DCHECK(!heap()->allocation_step_in_progress());
    1500             :   heap()->set_allocation_step_in_progress(true);
    1501    22352770 :   heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
    1502    44868896 :   for (AllocationObserver* observer : allocation_observers_) {
    1503    22516160 :     observer->AllocationStep(bytes_since_last, soon_object, size);
    1504             :   }
    1505             :   heap()->set_allocation_step_in_progress(false);
    1506             : }
    1507             : 
    1508           0 : intptr_t Space::GetNextInlineAllocationStepSize() {
    1509             :   intptr_t next_step = 0;
    1510    44558828 :   for (AllocationObserver* observer : allocation_observers_) {
    1511             :     next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
    1512    22350386 :                           : observer->bytes_to_next_step();
    1513             :   }
    1514             :   DCHECK(allocation_observers_.size() == 0 || next_step > 0);
    1515           0 :   return next_step;
    1516             : }
    1517             : 
    1518      468242 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
    1519             :                        Executability executable)
    1520     1404728 :     : SpaceWithLinearArea(heap, space), executable_(executable) {
    1521      468242 :   area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
    1522             :   accounting_stats_.Clear();
    1523      468242 : }
    1524             : 
    1525      468179 : void PagedSpace::TearDown() {
    1526     1298679 :   while (!memory_chunk_list_.Empty()) {
    1527             :     MemoryChunk* chunk = memory_chunk_list_.front();
    1528             :     memory_chunk_list_.Remove(chunk);
    1529      415248 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
    1530             :   }
    1531             :   accounting_stats_.Clear();
    1532      468181 : }
    1533             : 
    1534      372292 : void PagedSpace::RefillFreeList() {
    1535             :   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
    1536             :   // generation spaces out.
    1537      544467 :   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
    1538      372292 :       identity() != MAP_SPACE && identity() != RO_SPACE) {
    1539             :     return;
    1540             :   }
    1541             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    1542             :   size_t added = 0;
    1543             :   {
    1544             :     Page* p = nullptr;
    1545      841898 :     while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
    1546             :       // Only during compaction pages can actually change ownership. This is
    1547             :       // safe because there exists no other competing action on the page links
    1548             :       // during compaction.
    1549      469644 :       if (is_local()) {
    1550             :         DCHECK_NE(this, p->owner());
    1551             :         PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
    1552             :         base::MutexGuard guard(owner->mutex());
    1553       40558 :         owner->RefineAllocatedBytesAfterSweeping(p);
    1554       40558 :         owner->RemovePage(p);
    1555       40558 :         added += AddPage(p);
    1556             :       } else {
    1557             :         base::MutexGuard guard(mutex());
    1558             :         DCHECK_EQ(this, p->owner());
    1559      429086 :         RefineAllocatedBytesAfterSweeping(p);
    1560      429086 :         added += RelinkFreeListCategories(p);
    1561             :       }
    1562      469644 :       added += p->wasted_memory();
    1563      469644 :       if (is_local() && (added > kCompactionMemoryWanted)) break;
    1564             :     }
    1565             :   }
    1566             : }
    1567             : 
    1568      222097 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
    1569             :   base::MutexGuard guard(mutex());
    1570             : 
    1571             :   DCHECK(identity() == other->identity());
    1572             :   // Unmerged fields:
    1573             :   //   area_size_
    1574      222097 :   other->FreeLinearAllocationArea();
    1575             : 
    1576             :   // The linear allocation area of {other} should be destroyed now.
    1577             :   DCHECK_EQ(kNullAddress, other->top());
    1578             :   DCHECK_EQ(kNullAddress, other->limit());
    1579             : 
    1580             :   // Move over pages.
    1581      367971 :   for (auto it = other->begin(); it != other->end();) {
    1582             :     Page* p = *(it++);
    1583             :     // Relinking requires the category to be unlinked.
    1584       72937 :     other->RemovePage(p);
    1585       72937 :     AddPage(p);
    1586             :     DCHECK_EQ(p->AvailableInFreeList(),
    1587             :               p->AvailableInFreeListFromAllocatedBytes());
    1588             :   }
    1589             :   DCHECK_EQ(0u, other->Size());
    1590             :   DCHECK_EQ(0u, other->Capacity());
    1591      222097 : }
    1592             : 
    1593             : 
    1594        1004 : size_t PagedSpace::CommittedPhysicalMemory() {
    1595        1004 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    1596        1004 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1597             :   size_t size = 0;
    1598        2734 :   for (Page* page : *this) {
    1599        1730 :     size += page->CommittedPhysicalMemory();
    1600             :   }
    1601             :   return size;
    1602             : }
    1603             : 
    1604          20 : bool PagedSpace::ContainsSlow(Address addr) {
    1605             :   Page* p = Page::FromAddress(addr);
    1606         505 :   for (Page* page : *this) {
    1607         500 :     if (page == p) return true;
    1608             :   }
    1609             :   return false;
    1610             : }
    1611             : 
    1612      469644 : void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
    1613      469644 :   CHECK(page->SweepingDone());
    1614             :   auto marking_state =
    1615             :       heap()->incremental_marking()->non_atomic_marking_state();
    1616             :   // The live_byte on the page was accounted in the space allocated
    1617             :   // bytes counter. After sweeping allocated_bytes() contains the
    1618             :   // accurate live byte count on the page.
    1619      469644 :   size_t old_counter = marking_state->live_bytes(page);
    1620             :   size_t new_counter = page->allocated_bytes();
    1621             :   DCHECK_GE(old_counter, new_counter);
    1622      469644 :   if (old_counter > new_counter) {
    1623       11940 :     DecreaseAllocatedBytes(old_counter - new_counter, page);
    1624             :     // Give the heap a chance to adjust counters in response to the
    1625             :     // more precise and smaller old generation size.
    1626             :     heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
    1627             :   }
    1628             :   marking_state->SetLiveBytes(page, 0);
    1629      469644 : }
    1630             : 
    1631       37195 : Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
    1632             :   base::MutexGuard guard(mutex());
    1633             :   // Check for pages that still contain free list entries. Bail out for smaller
    1634             :   // categories.
    1635             :   const int minimum_category =
    1636       74420 :       static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
    1637             :   Page* page = free_list()->GetPageForCategoryType(kHuge);
    1638       37210 :   if (!page && static_cast<int>(kLarge) >= minimum_category)
    1639             :     page = free_list()->GetPageForCategoryType(kLarge);
    1640       37210 :   if (!page && static_cast<int>(kMedium) >= minimum_category)
    1641             :     page = free_list()->GetPageForCategoryType(kMedium);
    1642       37210 :   if (!page && static_cast<int>(kSmall) >= minimum_category)
    1643             :     page = free_list()->GetPageForCategoryType(kSmall);
    1644       37210 :   if (!page && static_cast<int>(kTiny) >= minimum_category)
    1645             :     page = free_list()->GetPageForCategoryType(kTiny);
    1646       37210 :   if (!page && static_cast<int>(kTiniest) >= minimum_category)
    1647             :     page = free_list()->GetPageForCategoryType(kTiniest);
    1648       37210 :   if (!page) return nullptr;
    1649       14686 :   RemovePage(page);
    1650       14686 :   return page;
    1651             : }
    1652             : 
    1653      553188 : size_t PagedSpace::AddPage(Page* page) {
    1654      553188 :   CHECK(page->SweepingDone());
    1655      553188 :   page->set_owner(this);
    1656             :   memory_chunk_list_.PushBack(page);
    1657             :   AccountCommitted(page->size());
    1658             :   IncreaseCapacity(page->area_size());
    1659             :   IncreaseAllocatedBytes(page->allocated_bytes(), page);
    1660     2765944 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1661     1106377 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1662     1106377 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1663             :   }
    1664      553189 :   return RelinkFreeListCategories(page);
    1665             : }
    1666             : 
    1667      128181 : void PagedSpace::RemovePage(Page* page) {
    1668      128181 :   CHECK(page->SweepingDone());
    1669             :   memory_chunk_list_.Remove(page);
    1670             :   UnlinkFreeListCategories(page);
    1671             :   DecreaseAllocatedBytes(page->allocated_bytes(), page);
    1672             :   DecreaseCapacity(page->area_size());
    1673             :   AccountUncommitted(page->size());
    1674      640905 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1675      256362 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1676      256362 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1677             :   }
    1678      128181 : }
    1679             : 
    1680      184299 : size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
    1681      184299 :   size_t unused = page->ShrinkToHighWaterMark();
    1682             :   accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
    1683             :   AccountUncommitted(unused);
    1684      184298 :   return unused;
    1685             : }
    1686             : 
    1687         390 : void PagedSpace::ResetFreeList() {
    1688      371164 :   for (Page* page : *this) {
    1689      186505 :     free_list_.EvictFreeListItems(page);
    1690             :   }
    1691             :   DCHECK(free_list_.IsEmpty());
    1692         390 : }
    1693             : 
    1694      184269 : void PagedSpace::ShrinkImmortalImmovablePages() {
    1695             :   DCHECK(!heap()->deserialization_complete());
    1696      184269 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1697      184269 :   FreeLinearAllocationArea();
    1698             :   ResetFreeList();
    1699      368548 :   for (Page* page : *this) {
    1700             :     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
    1701      184279 :     ShrinkPageToHighWaterMark(page);
    1702             :   }
    1703      184269 : }
    1704             : 
    1705      435192 : bool PagedSpace::Expand() {
    1706             :   // Always lock against the main space as we can only adjust capacity and
    1707             :   // pages concurrently for the main paged space.
    1708      435192 :   base::MutexGuard guard(heap()->paged_space(identity())->mutex());
    1709             : 
    1710             :   const int size = AreaSize();
    1711             : 
    1712      435197 :   if (!heap()->CanExpandOldGeneration(size)) return false;
    1713             : 
    1714             :   Page* page =
    1715      424071 :       heap()->memory_allocator()->AllocatePage(size, this, executable());
    1716      424071 :   if (page == nullptr) return false;
    1717             :   // Pages created during bootstrapping may contain immortal immovable objects.
    1718      424071 :   if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
    1719      424071 :   AddPage(page);
    1720             :   Free(page->area_start(), page->area_size(),
    1721      424071 :        SpaceAccountingMode::kSpaceAccounted);
    1722      424069 :   heap()->NotifyOldGenerationExpansion();
    1723      424069 :   return true;
    1724             : }
    1725             : 
    1726             : 
    1727      155561 : int PagedSpace::CountTotalPages() {
    1728             :   int count = 0;
    1729      472020 :   for (Page* page : *this) {
    1730      316459 :     count++;
    1731             :     USE(page);
    1732             :   }
    1733      155561 :   return count;
    1734             : }
    1735             : 
    1736             : 
    1737      221865 : void PagedSpace::ResetFreeListStatistics() {
    1738      700456 :   for (Page* page : *this) {
    1739             :     page->ResetFreeListStatistics();
    1740             :   }
    1741      221865 : }
    1742             : 
    1743     1273098 : void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
    1744             :   SetTopAndLimit(top, limit);
    1745     2546198 :   if (top != kNullAddress && top != limit &&
    1746             :       heap()->incremental_marking()->black_allocation()) {
    1747      106091 :     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
    1748             :   }
    1749     1273098 : }
    1750             : 
    1751    22123660 : void PagedSpace::DecreaseLimit(Address new_limit) {
    1752             :   Address old_limit = limit();
    1753             :   DCHECK_LE(top(), new_limit);
    1754             :   DCHECK_GE(old_limit, new_limit);
    1755    22123660 :   if (new_limit != old_limit) {
    1756             :     SetTopAndLimit(top(), new_limit);
    1757       22717 :     Free(new_limit, old_limit - new_limit,
    1758       22717 :          SpaceAccountingMode::kSpaceAccounted);
    1759       22717 :     if (heap()->incremental_marking()->black_allocation()) {
    1760             :       Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
    1761        4436 :                                                                    old_limit);
    1762             :     }
    1763             :   }
    1764    22123660 : }
    1765             : 
    1766    24049212 : Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
    1767             :                                           size_t min_size) {
    1768             :   DCHECK_GE(end - start, min_size);
    1769             : 
    1770    24049212 :   if (heap()->inline_allocation_disabled()) {
    1771             :     // Fit the requested area exactly.
    1772      320392 :     return start + min_size;
    1773    46499416 :   } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
    1774             :     // Generated code may allocate inline from the linear allocation area for.
    1775             :     // To make sure we can observe these allocations, we use a lower limit.
    1776    22208442 :     size_t step = GetNextInlineAllocationStepSize();
    1777             : 
    1778             :     // TODO(ofrobots): there is subtle difference between old space and new
    1779             :     // space here. Any way to avoid it? `step - 1` makes more sense as we would
    1780             :     // like to sample the object that straddles the `start + step` boundary.
    1781             :     // Rounding down further would introduce a small statistical error in
    1782             :     // sampling. However, presently PagedSpace requires limit to be aligned.
    1783             :     size_t rounded_step;
    1784    22208442 :     if (identity() == NEW_SPACE) {
    1785             :       DCHECK_GE(step, 1);
    1786      411118 :       rounded_step = step - 1;
    1787             :     } else {
    1788    21797324 :       rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
    1789             :     }
    1790    44416860 :     return Min(static_cast<Address>(start + min_size + rounded_step), end);
    1791             :   } else {
    1792             :     // The entire node can be used as the linear allocation area.
    1793             :     return end;
    1794             :   }
    1795             : }
    1796             : 
    1797       77568 : void PagedSpace::MarkLinearAllocationAreaBlack() {
    1798             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1799             :   Address current_top = top();
    1800             :   Address current_limit = limit();
    1801       77568 :   if (current_top != kNullAddress && current_top != current_limit) {
    1802             :     Page::FromAllocationAreaAddress(current_top)
    1803       21413 :         ->CreateBlackArea(current_top, current_limit);
    1804             :   }
    1805       77568 : }
    1806             : 
    1807        2007 : void PagedSpace::UnmarkLinearAllocationArea() {
    1808             :   Address current_top = top();
    1809             :   Address current_limit = limit();
    1810        2007 :   if (current_top != kNullAddress && current_top != current_limit) {
    1811             :     Page::FromAllocationAreaAddress(current_top)
    1812         963 :         ->DestroyBlackArea(current_top, current_limit);
    1813             :   }
    1814        2007 : }
    1815             : 
    1816     2582387 : void PagedSpace::FreeLinearAllocationArea() {
    1817             :   // Mark the old linear allocation area with a free space map so it can be
    1818             :   // skipped when scanning the heap.
    1819             :   Address current_top = top();
    1820             :   Address current_limit = limit();
    1821     2582387 :   if (current_top == kNullAddress) {
    1822             :     DCHECK_EQ(kNullAddress, current_limit);
    1823             :     return;
    1824             :   }
    1825             : 
    1826     1156526 :   if (heap()->incremental_marking()->black_allocation()) {
    1827             :     Page* page = Page::FromAllocationAreaAddress(current_top);
    1828             : 
    1829             :     // Clear the bits in the unused black area.
    1830      118663 :     if (current_top != current_limit) {
    1831             :       IncrementalMarking::MarkingState* marking_state =
    1832             :           heap()->incremental_marking()->marking_state();
    1833       91062 :       marking_state->bitmap(page)->ClearRange(
    1834             :           page->AddressToMarkbitIndex(current_top),
    1835       91062 :           page->AddressToMarkbitIndex(current_limit));
    1836       91062 :       marking_state->IncrementLiveBytes(
    1837             :           page, -static_cast<int>(current_limit - current_top));
    1838             :     }
    1839             :   }
    1840             : 
    1841     1156526 :   InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
    1842             :   SetTopAndLimit(kNullAddress, kNullAddress);
    1843             :   DCHECK_GE(current_limit, current_top);
    1844             : 
    1845             :   // The code page of the linear allocation area needs to be unprotected
    1846             :   // because we are going to write a filler into that memory area below.
    1847     1156522 :   if (identity() == CODE_SPACE) {
    1848             :     heap()->UnprotectAndRegisterMemoryChunk(
    1849       91550 :         MemoryChunk::FromAddress(current_top));
    1850             :   }
    1851     1156521 :   Free(current_top, current_limit - current_top,
    1852     1156521 :        SpaceAccountingMode::kSpaceAccounted);
    1853             : }
    1854             : 
    1855        9707 : void PagedSpace::ReleasePage(Page* page) {
    1856             :   DCHECK_EQ(
    1857             :       0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
    1858             :              page));
    1859             :   DCHECK_EQ(page->owner(), this);
    1860             : 
    1861        9707 :   free_list_.EvictFreeListItems(page);
    1862             :   DCHECK(!free_list_.ContainsPageFreeListItems(page));
    1863             : 
    1864        9707 :   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
    1865             :     DCHECK(!top_on_previous_step_);
    1866             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    1867             :   }
    1868             : 
    1869             :   AccountUncommitted(page->size());
    1870             :   accounting_stats_.DecreaseCapacity(page->area_size());
    1871        9707 :   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    1872        9707 : }
    1873             : 
    1874       14985 : void PagedSpace::SetReadable() {
    1875             :   DCHECK(identity() == CODE_SPACE);
    1876       30183 :   for (Page* page : *this) {
    1877       15198 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1878       15198 :     page->SetReadable();
    1879             :   }
    1880       14985 : }
    1881             : 
    1882      270813 : void PagedSpace::SetReadAndExecutable() {
    1883             :   DCHECK(identity() == CODE_SPACE);
    1884      697111 :   for (Page* page : *this) {
    1885      426298 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1886      426298 :     page->SetReadAndExecutable();
    1887             :   }
    1888      270814 : }
    1889             : 
    1890      285798 : void PagedSpace::SetReadAndWritable() {
    1891             :   DCHECK(identity() == CODE_SPACE);
    1892      665430 :   for (Page* page : *this) {
    1893      379632 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1894      379632 :     page->SetReadAndWritable();
    1895             :   }
    1896      285799 : }
    1897             : 
    1898       31280 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
    1899       31280 :   return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
    1900             : }
    1901             : 
    1902     1853629 : bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
    1903             :   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
    1904             :   DCHECK_LE(top(), limit());
    1905             : #ifdef DEBUG
    1906             :   if (top() != limit()) {
    1907             :     DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
    1908             :   }
    1909             : #endif
    1910             :   // Don't free list allocate if there is linear space available.
    1911             :   DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
    1912             : 
    1913             :   // Mark the old linear allocation area with a free space map so it can be
    1914             :   // skipped when scanning the heap.  This also puts it back in the free list
    1915             :   // if it is big enough.
    1916     1853629 :   FreeLinearAllocationArea();
    1917             : 
    1918     1853613 :   if (!is_local()) {
    1919             :     heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    1920             :         heap()->GCFlagsForIncrementalMarking(),
    1921     1561268 :         kGCCallbackScheduleIdleGarbageCollection);
    1922             :   }
    1923             : 
    1924     1853615 :   size_t new_node_size = 0;
    1925     1853615 :   FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
    1926     1853613 :   if (new_node.is_null()) return false;
    1927             : 
    1928             :   DCHECK_GE(new_node_size, size_in_bytes);
    1929             : 
    1930             :   // The old-space-step might have finished sweeping and restarted marking.
    1931             :   // Verify that it did not turn the page of the new node into an evacuation
    1932             :   // candidate.
    1933             :   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
    1934             : 
    1935             :   // Memory in the linear allocation area is counted as allocated.  We may free
    1936             :   // a little of this again immediately - see below.
    1937             :   Page* page = Page::FromHeapObject(new_node);
    1938     1273106 :   IncreaseAllocatedBytes(new_node_size, page);
    1939             : 
    1940             :   Address start = new_node->address();
    1941     1273106 :   Address end = new_node->address() + new_node_size;
    1942     1273106 :   Address limit = ComputeLimit(start, end, size_in_bytes);
    1943             :   DCHECK_LE(limit, end);
    1944             :   DCHECK_LE(size_in_bytes, limit - start);
    1945     1273102 :   if (limit != end) {
    1946      211046 :     if (identity() == CODE_SPACE) {
    1947        2161 :       heap()->UnprotectAndRegisterMemoryChunk(page);
    1948             :     }
    1949      211046 :     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
    1950             :   }
    1951     1273097 :   SetLinearAllocationArea(start, limit);
    1952             : 
    1953     1273101 :   return true;
    1954             : }
    1955             : 
    1956             : #ifdef DEBUG
    1957             : void PagedSpace::Print() {}
    1958             : #endif
    1959             : 
    1960             : #ifdef VERIFY_HEAP
    1961             : void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
    1962             :   bool allocation_pointer_found_in_space =
    1963             :       (allocation_info_.top() == allocation_info_.limit());
    1964             :   size_t external_space_bytes[kNumTypes];
    1965             :   size_t external_page_bytes[kNumTypes];
    1966             : 
    1967             :   for (int i = 0; i < kNumTypes; i++) {
    1968             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1969             :   }
    1970             : 
    1971             :   for (Page* page : *this) {
    1972             :     CHECK(page->owner() == this);
    1973             : 
    1974             :     for (int i = 0; i < kNumTypes; i++) {
    1975             :       external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1976             :     }
    1977             : 
    1978             :     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
    1979             :       allocation_pointer_found_in_space = true;
    1980             :     }
    1981             :     CHECK(page->SweepingDone());
    1982             :     HeapObjectIterator it(page);
    1983             :     Address end_of_previous_object = page->area_start();
    1984             :     Address top = page->area_end();
    1985             : 
    1986             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    1987             :       CHECK(end_of_previous_object <= object->address());
    1988             : 
    1989             :       // The first word should be a map, and we expect all map pointers to
    1990             :       // be in map space.
    1991             :       Map map = object->map();
    1992             :       CHECK(map->IsMap());
    1993             :       CHECK(heap()->map_space()->Contains(map) ||
    1994             :             heap()->read_only_space()->Contains(map));
    1995             : 
    1996             :       // Perform space-specific object verification.
    1997             :       VerifyObject(object);
    1998             : 
    1999             :       // The object itself should look OK.
    2000             :       object->ObjectVerify(isolate);
    2001             : 
    2002             :       if (!FLAG_verify_heap_skip_remembered_set) {
    2003             :         heap()->VerifyRememberedSetFor(object);
    2004             :       }
    2005             : 
    2006             :       // All the interior pointers should be contained in the heap.
    2007             :       int size = object->Size();
    2008             :       object->IterateBody(map, size, visitor);
    2009             :       CHECK(object->address() + size <= top);
    2010             :       end_of_previous_object = object->address() + size;
    2011             : 
    2012             :       if (object->IsExternalString()) {
    2013             :         ExternalString external_string = ExternalString::cast(object);
    2014             :         size_t size = external_string->ExternalPayloadSize();
    2015             :         external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
    2016             :       } else if (object->IsJSArrayBuffer()) {
    2017             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2018             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2019             :           size_t size = array_buffer->byte_length();
    2020             :           external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2021             :         }
    2022             :       }
    2023             :     }
    2024             :     for (int i = 0; i < kNumTypes; i++) {
    2025             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2026             :       CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
    2027             :       external_space_bytes[t] += external_page_bytes[t];
    2028             :     }
    2029             :   }
    2030             :   for (int i = 0; i < kNumTypes; i++) {
    2031             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2032             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2033             :   }
    2034             :   CHECK(allocation_pointer_found_in_space);
    2035             : #ifdef DEBUG
    2036             :   VerifyCountersAfterSweeping();
    2037             : #endif
    2038             : }
    2039             : 
    2040             : void PagedSpace::VerifyLiveBytes() {
    2041             :   IncrementalMarking::MarkingState* marking_state =
    2042             :       heap()->incremental_marking()->marking_state();
    2043             :   for (Page* page : *this) {
    2044             :     CHECK(page->SweepingDone());
    2045             :     HeapObjectIterator it(page);
    2046             :     int black_size = 0;
    2047             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2048             :       // All the interior pointers should be contained in the heap.
    2049             :       if (marking_state->IsBlack(object)) {
    2050             :         black_size += object->Size();
    2051             :       }
    2052             :     }
    2053             :     CHECK_LE(black_size, marking_state->live_bytes(page));
    2054             :   }
    2055             : }
    2056             : #endif  // VERIFY_HEAP
    2057             : 
    2058             : #ifdef DEBUG
    2059             : void PagedSpace::VerifyCountersAfterSweeping() {
    2060             :   size_t total_capacity = 0;
    2061             :   size_t total_allocated = 0;
    2062             :   for (Page* page : *this) {
    2063             :     DCHECK(page->SweepingDone());
    2064             :     total_capacity += page->area_size();
    2065             :     HeapObjectIterator it(page);
    2066             :     size_t real_allocated = 0;
    2067             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2068             :       if (!object->IsFiller()) {
    2069             :         real_allocated += object->Size();
    2070             :       }
    2071             :     }
    2072             :     total_allocated += page->allocated_bytes();
    2073             :     // The real size can be smaller than the accounted size if array trimming,
    2074             :     // object slack tracking happened after sweeping.
    2075             :     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
    2076             :     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
    2077             :   }
    2078             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2079             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2080             : }
    2081             : 
    2082             : void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
    2083             :   // We need to refine the counters on pages that are already swept and have
    2084             :   // not been moved over to the actual space. Otherwise, the AccountingStats
    2085             :   // are just an over approximation.
    2086             :   RefillFreeList();
    2087             : 
    2088             :   size_t total_capacity = 0;
    2089             :   size_t total_allocated = 0;
    2090             :   auto marking_state =
    2091             :       heap()->incremental_marking()->non_atomic_marking_state();
    2092             :   for (Page* page : *this) {
    2093             :     size_t page_allocated =
    2094             :         page->SweepingDone()
    2095             :             ? page->allocated_bytes()
    2096             :             : static_cast<size_t>(marking_state->live_bytes(page));
    2097             :     total_capacity += page->area_size();
    2098             :     total_allocated += page_allocated;
    2099             :     DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
    2100             :   }
    2101             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2102             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2103             : }
    2104             : #endif
    2105             : 
    2106             : // -----------------------------------------------------------------------------
    2107             : // NewSpace implementation
    2108             : 
    2109       61539 : NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2110             :                    size_t initial_semispace_capacity,
    2111             :                    size_t max_semispace_capacity)
    2112             :     : SpaceWithLinearArea(heap, NEW_SPACE),
    2113             :       to_space_(heap, kToSpace),
    2114       61539 :       from_space_(heap, kFromSpace) {
    2115             :   DCHECK(initial_semispace_capacity <= max_semispace_capacity);
    2116             :   DCHECK(
    2117             :       base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
    2118             : 
    2119             :   to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2120             :   from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2121       61539 :   if (!to_space_.Commit()) {
    2122           0 :     V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
    2123             :   }
    2124             :   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
    2125       61539 :   ResetLinearAllocationArea();
    2126       61539 : }
    2127             : 
    2128       61528 : void NewSpace::TearDown() {
    2129             :   allocation_info_.Reset(kNullAddress, kNullAddress);
    2130             : 
    2131       61528 :   to_space_.TearDown();
    2132       61529 :   from_space_.TearDown();
    2133       61529 : }
    2134             : 
    2135       94928 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
    2136             : 
    2137             : 
    2138        1789 : void NewSpace::Grow() {
    2139             :   // Double the semispace size but only up to maximum capacity.
    2140             :   DCHECK(TotalCapacity() < MaximumCapacity());
    2141             :   size_t new_capacity =
    2142        1789 :       Min(MaximumCapacity(),
    2143             :           static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
    2144        1789 :   if (to_space_.GrowTo(new_capacity)) {
    2145             :     // Only grow from space if we managed to grow to-space.
    2146        1789 :     if (!from_space_.GrowTo(new_capacity)) {
    2147             :       // If we managed to grow to-space but couldn't grow from-space,
    2148             :       // attempt to shrink to-space.
    2149           0 :       if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
    2150             :         // We are in an inconsistent state because we could not
    2151             :         // commit/uncommit memory from new space.
    2152           0 :         FATAL("inconsistent state");
    2153             :       }
    2154             :     }
    2155             :   }
    2156             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2157        1789 : }
    2158             : 
    2159             : 
    2160       26318 : void NewSpace::Shrink() {
    2161       26318 :   size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
    2162             :   size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
    2163       26521 :   if (rounded_new_capacity < TotalCapacity() &&
    2164         203 :       to_space_.ShrinkTo(rounded_new_capacity)) {
    2165             :     // Only shrink from-space if we managed to shrink to-space.
    2166             :     from_space_.Reset();
    2167         203 :     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
    2168             :       // If we managed to shrink to-space but couldn't shrink from
    2169             :       // space, attempt to grow to-space again.
    2170           0 :       if (!to_space_.GrowTo(from_space_.current_capacity())) {
    2171             :         // We are in an inconsistent state because we could not
    2172             :         // commit/uncommit memory from new space.
    2173           0 :         FATAL("inconsistent state");
    2174             :       }
    2175             :     }
    2176             :   }
    2177             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2178       26318 : }
    2179             : 
    2180       73955 : bool NewSpace::Rebalance() {
    2181             :   // Order here is important to make use of the page pool.
    2182      147910 :   return to_space_.EnsureCurrentCapacity() &&
    2183      147910 :          from_space_.EnsureCurrentCapacity();
    2184             : }
    2185             : 
    2186      147910 : bool SemiSpace::EnsureCurrentCapacity() {
    2187      147910 :   if (is_committed()) {
    2188             :     const int expected_pages =
    2189      147910 :         static_cast<int>(current_capacity_ / Page::kPageSize);
    2190             :     MemoryChunk* current_page = first_page();
    2191             :     int actual_pages = 0;
    2192             : 
    2193             :     // First iterate through the pages list until expected pages if so many
    2194             :     // pages exist.
    2195     1582724 :     while (current_page != nullptr && actual_pages < expected_pages) {
    2196      717407 :       actual_pages++;
    2197             :       current_page = current_page->list_node().next();
    2198             :     }
    2199             : 
    2200             :     // Free all overallocated pages which are behind current_page.
    2201        1544 :     while (current_page) {
    2202             :       MemoryChunk* next_current = current_page->list_node().next();
    2203             :       memory_chunk_list_.Remove(current_page);
    2204             :       // Clear new space flags to avoid this page being treated as a new
    2205             :       // space page that is potentially being swept.
    2206             :       current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
    2207             :       heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    2208        1544 :           current_page);
    2209             :       current_page = next_current;
    2210             :     }
    2211             : 
    2212             :     // Add more pages if we have less than expected_pages.
    2213             :     IncrementalMarking::NonAtomicMarkingState* marking_state =
    2214             :         heap()->incremental_marking()->non_atomic_marking_state();
    2215        2481 :     while (actual_pages < expected_pages) {
    2216        2481 :       actual_pages++;
    2217             :       current_page =
    2218             :           heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2219             :               MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2220        2481 :               NOT_EXECUTABLE);
    2221        2481 :       if (current_page == nullptr) return false;
    2222             :       DCHECK_NOT_NULL(current_page);
    2223             :       memory_chunk_list_.PushBack(current_page);
    2224             :       marking_state->ClearLiveness(current_page);
    2225             :       current_page->SetFlags(first_page()->GetFlags(),
    2226             :                              static_cast<uintptr_t>(Page::kCopyAllFlags));
    2227             :       heap()->CreateFillerObjectAt(current_page->area_start(),
    2228             :                                    static_cast<int>(current_page->area_size()),
    2229        2481 :                                    ClearRecordedSlots::kNo);
    2230             :     }
    2231             :   }
    2232             :   return true;
    2233             : }
    2234             : 
    2235      891704 : LinearAllocationArea LocalAllocationBuffer::Close() {
    2236      891704 :   if (IsValid()) {
    2237       93928 :     heap_->CreateFillerObjectAt(
    2238             :         allocation_info_.top(),
    2239             :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2240      187856 :         ClearRecordedSlots::kNo);
    2241       93929 :     const LinearAllocationArea old_info = allocation_info_;
    2242       93929 :     allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
    2243       93929 :     return old_info;
    2244             :   }
    2245      797776 :   return LinearAllocationArea(kNullAddress, kNullAddress);
    2246             : }
    2247             : 
    2248      336301 : LocalAllocationBuffer::LocalAllocationBuffer(
    2249             :     Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    2250             :     : heap_(heap),
    2251      336301 :       allocation_info_(allocation_info) {
    2252      336301 :   if (IsValid()) {
    2253             :     heap_->CreateFillerObjectAt(
    2254             :         allocation_info_.top(),
    2255             :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2256      147921 :         ClearRecordedSlots::kNo);
    2257             :   }
    2258      336296 : }
    2259             : 
    2260      148047 : LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
    2261             :     V8_NOEXCEPT {
    2262             :   *this = other;
    2263      148042 : }
    2264             : 
    2265      148296 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
    2266             :     const LocalAllocationBuffer& other) V8_NOEXCEPT {
    2267      296343 :   Close();
    2268      296338 :   heap_ = other.heap_;
    2269      296338 :   allocation_info_ = other.allocation_info_;
    2270             : 
    2271             :   // This is needed since we (a) cannot yet use move-semantics, and (b) want
    2272             :   // to make the use of the class easy by it as value and (c) implicitly call
    2273             :   // {Close} upon copy.
    2274             :   const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
    2275             :       kNullAddress, kNullAddress);
    2276      148296 :   return *this;
    2277             : }
    2278             : 
    2279      254478 : void NewSpace::UpdateLinearAllocationArea() {
    2280             :   // Make sure there is no unaccounted allocations.
    2281             :   DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
    2282             : 
    2283             :   Address new_top = to_space_.page_low();
    2284      254478 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2285             :   allocation_info_.Reset(new_top, to_space_.page_high());
    2286             :   // The order of the following two stores is important.
    2287             :   // See the corresponding loads in ConcurrentMarking::Run.
    2288             :   original_limit_.store(limit(), std::memory_order_relaxed);
    2289             :   original_top_.store(top(), std::memory_order_release);
    2290      254478 :   StartNextInlineAllocationStep();
    2291             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2292      254478 : }
    2293             : 
    2294      156467 : void NewSpace::ResetLinearAllocationArea() {
    2295             :   // Do a step to account for memory allocated so far before resetting.
    2296      156467 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2297             :   to_space_.Reset();
    2298      156467 :   UpdateLinearAllocationArea();
    2299             :   // Clear all mark-bits in the to-space.
    2300             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2301             :       heap()->incremental_marking()->non_atomic_marking_state();
    2302      915404 :   for (Page* p : to_space_) {
    2303             :     marking_state->ClearLiveness(p);
    2304             :     // Concurrent marking may have local live bytes for this page.
    2305      758937 :     heap()->concurrent_marking()->ClearMemoryChunkData(p);
    2306             :   }
    2307      156467 : }
    2308             : 
    2309      652431 : void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2310      652431 :   Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
    2311             :   allocation_info_.set_limit(new_limit);
    2312             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2313      652431 : }
    2314             : 
    2315    22123687 : void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2316    22123687 :   Address new_limit = ComputeLimit(top(), limit(), min_size);
    2317             :   DCHECK_LE(new_limit, limit());
    2318    22123660 :   DecreaseLimit(new_limit);
    2319    22123650 : }
    2320             : 
    2321      113524 : bool NewSpace::AddFreshPage() {
    2322             :   Address top = allocation_info_.top();
    2323             :   DCHECK(!OldSpace::IsAtPageStart(top));
    2324             : 
    2325             :   // Do a step to account for memory allocated on previous page.
    2326      113524 :   InlineAllocationStep(top, top, kNullAddress, 0);
    2327             : 
    2328      113524 :   if (!to_space_.AdvancePage()) {
    2329             :     // No more pages left to advance.
    2330             :     return false;
    2331             :   }
    2332             : 
    2333             :   // Clear remainder of current page.
    2334             :   Address limit = Page::FromAllocationAreaAddress(top)->area_end();
    2335       98011 :   int remaining_in_page = static_cast<int>(limit - top);
    2336       98011 :   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
    2337       98011 :   UpdateLinearAllocationArea();
    2338             : 
    2339       98011 :   return true;
    2340             : }
    2341             : 
    2342             : 
    2343           0 : bool NewSpace::AddFreshPageSynchronized() {
    2344           0 :   base::MutexGuard guard(&mutex_);
    2345           0 :   return AddFreshPage();
    2346             : }
    2347             : 
    2348             : 
    2349      350504 : bool NewSpace::EnsureAllocation(int size_in_bytes,
    2350             :                                 AllocationAlignment alignment) {
    2351             :   Address old_top = allocation_info_.top();
    2352             :   Address high = to_space_.page_high();
    2353      350504 :   int filler_size = Heap::GetFillToAlign(old_top, alignment);
    2354      350504 :   int aligned_size_in_bytes = size_in_bytes + filler_size;
    2355             : 
    2356      350504 :   if (old_top + aligned_size_in_bytes > high) {
    2357             :     // Not enough room in the page, try to allocate a new one.
    2358      112546 :     if (!AddFreshPage()) {
    2359             :       return false;
    2360             :     }
    2361             : 
    2362             :     old_top = allocation_info_.top();
    2363             :     high = to_space_.page_high();
    2364       97090 :     filler_size = Heap::GetFillToAlign(old_top, alignment);
    2365             :   }
    2366             : 
    2367             :   DCHECK(old_top + aligned_size_in_bytes <= high);
    2368             : 
    2369      335048 :   if (allocation_info_.limit() < high) {
    2370             :     // Either the limit has been lowered because linear allocation was disabled
    2371             :     // or because incremental marking wants to get a chance to do a step,
    2372             :     // or because idle scavenge job wants to get a chance to post a task.
    2373             :     // Set the new limit accordingly.
    2374      259173 :     Address new_top = old_top + aligned_size_in_bytes;
    2375      259173 :     Address soon_object = old_top + filler_size;
    2376      259173 :     InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
    2377      259173 :     UpdateInlineAllocationLimit(aligned_size_in_bytes);
    2378             :   }
    2379             :   return true;
    2380             : }
    2381             : 
    2382       95548 : size_t LargeObjectSpace::Available() {
    2383             :   // We return zero here since we cannot take advantage of already allocated
    2384             :   // large object memory.
    2385       95548 :   return 0;
    2386             : }
    2387             : 
    2388   125750724 : void SpaceWithLinearArea::StartNextInlineAllocationStep() {
    2389   125750724 :   if (heap()->allocation_step_in_progress()) {
    2390             :     // If we are mid-way through an existing step, don't start a new one.
    2391             :     return;
    2392             :   }
    2393             : 
    2394   125750767 :   if (AllocationObserversActive()) {
    2395    22015724 :     top_on_previous_step_ = top();
    2396    22015724 :     UpdateInlineAllocationLimit(0);
    2397             :   } else {
    2398             :     DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2399             :   }
    2400             : }
    2401             : 
    2402      206083 : void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
    2403      206083 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2404      206083 :   Space::AddAllocationObserver(observer);
    2405             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2406      206083 : }
    2407             : 
    2408      185898 : void SpaceWithLinearArea::RemoveAllocationObserver(
    2409             :     AllocationObserver* observer) {
    2410             :   Address top_for_next_step =
    2411      185898 :       allocation_observers_.size() == 1 ? kNullAddress : top();
    2412      185898 :   InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
    2413      185897 :   Space::RemoveAllocationObserver(observer);
    2414             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2415      185897 : }
    2416             : 
    2417      484840 : void SpaceWithLinearArea::PauseAllocationObservers() {
    2418             :   // Do a step to account for memory allocated so far.
    2419      484840 :   InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
    2420             :   Space::PauseAllocationObservers();
    2421             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2422      484840 :   UpdateInlineAllocationLimit(0);
    2423      484840 : }
    2424             : 
    2425      484840 : void SpaceWithLinearArea::ResumeAllocationObservers() {
    2426             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2427             :   Space::ResumeAllocationObservers();
    2428      484840 :   StartNextInlineAllocationStep();
    2429      484840 : }
    2430             : 
    2431     2562509 : void SpaceWithLinearArea::InlineAllocationStep(Address top,
    2432             :                                                Address top_for_next_step,
    2433             :                                                Address soon_object,
    2434             :                                                size_t size) {
    2435     2562509 :   if (heap()->allocation_step_in_progress()) {
    2436             :     // Avoid starting a new step if we are mid-way through an existing one.
    2437             :     return;
    2438             :   }
    2439             : 
    2440     2562509 :   if (top_on_previous_step_) {
    2441      729623 :     if (top < top_on_previous_step_) {
    2442             :       // Generated code decreased the top pointer to do folded allocations.
    2443             :       DCHECK_NE(top, kNullAddress);
    2444             :       DCHECK_EQ(Page::FromAllocationAreaAddress(top),
    2445             :                 Page::FromAllocationAreaAddress(top_on_previous_step_));
    2446          46 :       top_on_previous_step_ = top;
    2447             :     }
    2448      729623 :     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    2449      729623 :     AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
    2450      729623 :     top_on_previous_step_ = top_for_next_step;
    2451             :   }
    2452             : }
    2453             : 
    2454        7820 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
    2455        7820 :   return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
    2456             : }
    2457             : 
    2458             : #ifdef VERIFY_HEAP
    2459             : // We do not use the SemiSpaceIterator because verification doesn't assume
    2460             : // that it works (it depends on the invariants we are checking).
    2461             : void NewSpace::Verify(Isolate* isolate) {
    2462             :   // The allocation pointer should be in the space or at the very end.
    2463             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2464             : 
    2465             :   // There should be objects packed in from the low address up to the
    2466             :   // allocation pointer.
    2467             :   Address current = to_space_.first_page()->area_start();
    2468             :   CHECK_EQ(current, to_space_.space_start());
    2469             : 
    2470             :   size_t external_space_bytes[kNumTypes];
    2471             :   for (int i = 0; i < kNumTypes; i++) {
    2472             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2473             :   }
    2474             : 
    2475             :   while (current != top()) {
    2476             :     if (!Page::IsAlignedToPageSize(current)) {
    2477             :       // The allocation pointer should not be in the middle of an object.
    2478             :       CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
    2479             :             current < top());
    2480             : 
    2481             :       HeapObject object = HeapObject::FromAddress(current);
    2482             : 
    2483             :       // The first word should be a map, and we expect all map pointers to
    2484             :       // be in map space or read-only space.
    2485             :       Map map = object->map();
    2486             :       CHECK(map->IsMap());
    2487             :       CHECK(heap()->map_space()->Contains(map) ||
    2488             :             heap()->read_only_space()->Contains(map));
    2489             : 
    2490             :       // The object should not be code or a map.
    2491             :       CHECK(!object->IsMap());
    2492             :       CHECK(!object->IsAbstractCode());
    2493             : 
    2494             :       // The object itself should look OK.
    2495             :       object->ObjectVerify(isolate);
    2496             : 
    2497             :       // All the interior pointers should be contained in the heap.
    2498             :       VerifyPointersVisitor visitor(heap());
    2499             :       int size = object->Size();
    2500             :       object->IterateBody(map, size, &visitor);
    2501             : 
    2502             :       if (object->IsExternalString()) {
    2503             :         ExternalString external_string = ExternalString::cast(object);
    2504             :         size_t size = external_string->ExternalPayloadSize();
    2505             :         external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
    2506             :       } else if (object->IsJSArrayBuffer()) {
    2507             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2508             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2509             :           size_t size = array_buffer->byte_length();
    2510             :           external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2511             :         }
    2512             :       }
    2513             : 
    2514             :       current += size;
    2515             :     } else {
    2516             :       // At end of page, switch to next page.
    2517             :       Page* page = Page::FromAllocationAreaAddress(current)->next_page();
    2518             :       current = page->area_start();
    2519             :     }
    2520             :   }
    2521             : 
    2522             :   for (int i = 0; i < kNumTypes; i++) {
    2523             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2524             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2525             :   }
    2526             : 
    2527             :   // Check semi-spaces.
    2528             :   CHECK_EQ(from_space_.id(), kFromSpace);
    2529             :   CHECK_EQ(to_space_.id(), kToSpace);
    2530             :   from_space_.Verify();
    2531             :   to_space_.Verify();
    2532             : }
    2533             : #endif
    2534             : 
    2535             : // -----------------------------------------------------------------------------
    2536             : // SemiSpace implementation
    2537             : 
    2538           0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
    2539             :   DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
    2540      123078 :   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
    2541      123078 :   current_capacity_ = minimum_capacity_;
    2542      123078 :   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
    2543       61539 :   committed_ = false;
    2544           0 : }
    2545             : 
    2546             : 
    2547           0 : void SemiSpace::TearDown() {
    2548             :   // Properly uncommit memory to keep the allocator counters in sync.
    2549      123057 :   if (is_committed()) {
    2550       74626 :     Uncommit();
    2551             :   }
    2552      123058 :   current_capacity_ = maximum_capacity_ = 0;
    2553           0 : }
    2554             : 
    2555             : 
    2556       99676 : bool SemiSpace::Commit() {
    2557             :   DCHECK(!is_committed());
    2558       99676 :   const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
    2559      901534 :   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
    2560             :     Page* new_page =
    2561             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2562             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2563      400929 :             NOT_EXECUTABLE);
    2564      400929 :     if (new_page == nullptr) {
    2565           0 :       if (pages_added) RewindPages(pages_added);
    2566             :       return false;
    2567             :     }
    2568             :     memory_chunk_list_.PushBack(new_page);
    2569             :   }
    2570             :   Reset();
    2571       99676 :   AccountCommitted(current_capacity_);
    2572       99676 :   if (age_mark_ == kNullAddress) {
    2573       77718 :     age_mark_ = first_page()->area_start();
    2574             :   }
    2575       99676 :   committed_ = true;
    2576       99676 :   return true;
    2577             : }
    2578             : 
    2579             : 
    2580       99659 : bool SemiSpace::Uncommit() {
    2581             :   DCHECK(is_committed());
    2582      952935 :   while (!memory_chunk_list_.Empty()) {
    2583             :     MemoryChunk* chunk = memory_chunk_list_.front();
    2584             :     memory_chunk_list_.Remove(chunk);
    2585      426636 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
    2586             :   }
    2587       99661 :   current_page_ = nullptr;
    2588       99661 :   AccountUncommitted(current_capacity_);
    2589       99661 :   committed_ = false;
    2590       99661 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2591       99661 :   return true;
    2592             : }
    2593             : 
    2594             : 
    2595           0 : size_t SemiSpace::CommittedPhysicalMemory() {
    2596         342 :   if (!is_committed()) return 0;
    2597             :   size_t size = 0;
    2598        1710 :   for (Page* p : *this) {
    2599        1368 :     size += p->CommittedPhysicalMemory();
    2600             :   }
    2601             :   return size;
    2602             : }
    2603             : 
    2604        3578 : bool SemiSpace::GrowTo(size_t new_capacity) {
    2605        3578 :   if (!is_committed()) {
    2606         172 :     if (!Commit()) return false;
    2607             :   }
    2608             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2609             :   DCHECK_LE(new_capacity, maximum_capacity_);
    2610             :   DCHECK_GT(new_capacity, current_capacity_);
    2611        3578 :   const size_t delta = new_capacity - current_capacity_;
    2612             :   DCHECK(IsAligned(delta, AllocatePageSize()));
    2613        3578 :   const int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2614             :   DCHECK(last_page());
    2615             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2616             :       heap()->incremental_marking()->non_atomic_marking_state();
    2617       61518 :   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
    2618             :     Page* new_page =
    2619             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2620             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2621       28970 :             NOT_EXECUTABLE);
    2622       28970 :     if (new_page == nullptr) {
    2623           0 :       if (pages_added) RewindPages(pages_added);
    2624             :       return false;
    2625             :     }
    2626             :     memory_chunk_list_.PushBack(new_page);
    2627             :     marking_state->ClearLiveness(new_page);
    2628             :     // Duplicate the flags that was set on the old page.
    2629             :     new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
    2630             :   }
    2631             :   AccountCommitted(delta);
    2632        3578 :   current_capacity_ = new_capacity;
    2633        3578 :   return true;
    2634             : }
    2635             : 
    2636         406 : void SemiSpace::RewindPages(int num_pages) {
    2637             :   DCHECK_GT(num_pages, 0);
    2638             :   DCHECK(last_page());
    2639        6802 :   while (num_pages > 0) {
    2640             :     MemoryChunk* last = last_page();
    2641             :     memory_chunk_list_.Remove(last);
    2642        3198 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
    2643        3198 :     num_pages--;
    2644             :   }
    2645         406 : }
    2646             : 
    2647         406 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
    2648             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2649             :   DCHECK_GE(new_capacity, minimum_capacity_);
    2650             :   DCHECK_LT(new_capacity, current_capacity_);
    2651         406 :   if (is_committed()) {
    2652         406 :     const size_t delta = current_capacity_ - new_capacity;
    2653             :     DCHECK(IsAligned(delta, Page::kPageSize));
    2654         406 :     int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2655         406 :     RewindPages(delta_pages);
    2656             :     AccountUncommitted(delta);
    2657         406 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2658             :   }
    2659         406 :   current_capacity_ = new_capacity;
    2660         406 :   return true;
    2661             : }
    2662             : 
    2663      189856 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
    2664     1213584 :   for (Page* page : *this) {
    2665     1023728 :     page->set_owner(this);
    2666     1023728 :     page->SetFlags(flags, mask);
    2667     1023728 :     if (id_ == kToSpace) {
    2668             :       page->ClearFlag(MemoryChunk::FROM_PAGE);
    2669             :       page->SetFlag(MemoryChunk::TO_PAGE);
    2670             :       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2671             :       heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
    2672             :           page, 0);
    2673             :     } else {
    2674             :       page->SetFlag(MemoryChunk::FROM_PAGE);
    2675             :       page->ClearFlag(MemoryChunk::TO_PAGE);
    2676             :     }
    2677             :     DCHECK(page->InYoungGeneration());
    2678             :   }
    2679      189856 : }
    2680             : 
    2681             : 
    2682           0 : void SemiSpace::Reset() {
    2683             :   DCHECK(first_page());
    2684             :   DCHECK(last_page());
    2685      256346 :   current_page_ = first_page();
    2686      256346 :   pages_used_ = 0;
    2687           0 : }
    2688             : 
    2689        2481 : void SemiSpace::RemovePage(Page* page) {
    2690        2481 :   if (current_page_ == page) {
    2691         190 :     if (page->prev_page()) {
    2692         185 :       current_page_ = page->prev_page();
    2693             :     }
    2694             :   }
    2695             :   memory_chunk_list_.Remove(page);
    2696       12405 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2697        4962 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2698        4962 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2699             :   }
    2700        2481 : }
    2701             : 
    2702        1544 : void SemiSpace::PrependPage(Page* page) {
    2703             :   page->SetFlags(current_page()->GetFlags(),
    2704             :                  static_cast<uintptr_t>(Page::kCopyAllFlags));
    2705        1544 :   page->set_owner(this);
    2706             :   memory_chunk_list_.PushFront(page);
    2707        1544 :   pages_used_++;
    2708        7720 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2709        3088 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2710        3088 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2711             :   }
    2712        1544 : }
    2713             : 
    2714       94928 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
    2715             :   // We won't be swapping semispaces without data in them.
    2716             :   DCHECK(from->first_page());
    2717             :   DCHECK(to->first_page());
    2718             : 
    2719       94928 :   intptr_t saved_to_space_flags = to->current_page()->GetFlags();
    2720             : 
    2721             :   // We swap all properties but id_.
    2722             :   std::swap(from->current_capacity_, to->current_capacity_);
    2723             :   std::swap(from->maximum_capacity_, to->maximum_capacity_);
    2724             :   std::swap(from->minimum_capacity_, to->minimum_capacity_);
    2725             :   std::swap(from->age_mark_, to->age_mark_);
    2726             :   std::swap(from->committed_, to->committed_);
    2727             :   std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
    2728             :   std::swap(from->current_page_, to->current_page_);
    2729             :   std::swap(from->external_backing_store_bytes_,
    2730             :             to->external_backing_store_bytes_);
    2731             : 
    2732       94928 :   to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
    2733       94928 :   from->FixPagesFlags(0, 0);
    2734       94928 : }
    2735             : 
    2736       94928 : void SemiSpace::set_age_mark(Address mark) {
    2737             :   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
    2738       94928 :   age_mark_ = mark;
    2739             :   // Mark all pages up to the one containing mark.
    2740      201032 :   for (Page* p : PageRange(space_start(), mark)) {
    2741             :     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2742             :   }
    2743       94928 : }
    2744             : 
    2745           0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
    2746             :   // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
    2747           0 :   UNREACHABLE();
    2748             : }
    2749             : 
    2750             : #ifdef DEBUG
    2751             : void SemiSpace::Print() {}
    2752             : #endif
    2753             : 
    2754             : #ifdef VERIFY_HEAP
    2755             : void SemiSpace::Verify() {
    2756             :   bool is_from_space = (id_ == kFromSpace);
    2757             :   size_t external_backing_store_bytes[kNumTypes];
    2758             : 
    2759             :   for (int i = 0; i < kNumTypes; i++) {
    2760             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2761             :   }
    2762             : 
    2763             :   for (Page* page : *this) {
    2764             :     CHECK_EQ(page->owner(), this);
    2765             :     CHECK(page->InNewSpace());
    2766             :     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
    2767             :                                         : MemoryChunk::TO_PAGE));
    2768             :     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
    2769             :                                          : MemoryChunk::FROM_PAGE));
    2770             :     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    2771             :     if (!is_from_space) {
    2772             :       // The pointers-from-here-are-interesting flag isn't updated dynamically
    2773             :       // on from-space pages, so it might be out of sync with the marking state.
    2774             :       if (page->heap()->incremental_marking()->IsMarking()) {
    2775             :         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2776             :       } else {
    2777             :         CHECK(
    2778             :             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2779             :       }
    2780             :     }
    2781             :     for (int i = 0; i < kNumTypes; i++) {
    2782             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2783             :       external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
    2784             :     }
    2785             : 
    2786             :     CHECK_IMPLIES(page->list_node().prev(),
    2787             :                   page->list_node().prev()->list_node().next() == page);
    2788             :   }
    2789             :   for (int i = 0; i < kNumTypes; i++) {
    2790             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2791             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    2792             :   }
    2793             : }
    2794             : #endif
    2795             : 
    2796             : #ifdef DEBUG
    2797             : void SemiSpace::AssertValidRange(Address start, Address end) {
    2798             :   // Addresses belong to same semi-space
    2799             :   Page* page = Page::FromAllocationAreaAddress(start);
    2800             :   Page* end_page = Page::FromAllocationAreaAddress(end);
    2801             :   SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
    2802             :   DCHECK_EQ(space, end_page->owner());
    2803             :   // Start address is before end address, either on same page,
    2804             :   // or end address is on a later page in the linked list of
    2805             :   // semi-space pages.
    2806             :   if (page == end_page) {
    2807             :     DCHECK_LE(start, end);
    2808             :   } else {
    2809             :     while (page != end_page) {
    2810             :       page = page->next_page();
    2811             :     }
    2812             :     DCHECK(page);
    2813             :   }
    2814             : }
    2815             : #endif
    2816             : 
    2817             : 
    2818             : // -----------------------------------------------------------------------------
    2819             : // SemiSpaceIterator implementation.
    2820             : 
    2821        7820 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
    2822             :   Initialize(space->first_allocatable_address(), space->top());
    2823           0 : }
    2824             : 
    2825             : 
    2826           0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
    2827             :   SemiSpace::AssertValidRange(start, end);
    2828        7820 :   current_ = start;
    2829        7820 :   limit_ = end;
    2830           0 : }
    2831             : 
    2832         251 : size_t NewSpace::CommittedPhysicalMemory() {
    2833         251 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    2834         251 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2835             :   size_t size = to_space_.CommittedPhysicalMemory();
    2836         251 :   if (from_space_.is_committed()) {
    2837          91 :     size += from_space_.CommittedPhysicalMemory();
    2838             :   }
    2839             :   return size;
    2840             : }
    2841             : 
    2842             : 
    2843             : // -----------------------------------------------------------------------------
    2844             : // Free lists for old object spaces implementation
    2845             : 
    2846             : 
    2847           0 : void FreeListCategory::Reset() {
    2848             :   set_top(FreeSpace());
    2849             :   set_prev(nullptr);
    2850             :   set_next(nullptr);
    2851     2256845 :   available_ = 0;
    2852           0 : }
    2853             : 
    2854      731594 : FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
    2855             :                                              size_t* node_size) {
    2856             :   DCHECK(page()->CanAllocate());
    2857             :   FreeSpace node = top();
    2858     1391091 :   if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
    2859      123713 :     *node_size = 0;
    2860      123713 :     return FreeSpace();
    2861             :   }
    2862             :   set_top(node->next());
    2863      607881 :   *node_size = node->Size();
    2864      607881 :   available_ -= *node_size;
    2865      607881 :   return node;
    2866             : }
    2867             : 
    2868      689512 : FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
    2869             :                                                 size_t* node_size) {
    2870             :   DCHECK(page()->CanAllocate());
    2871             :   FreeSpace prev_non_evac_node;
    2872      691904 :   for (FreeSpace cur_node = top(); !cur_node.is_null();
    2873             :        cur_node = cur_node->next()) {
    2874      666420 :     size_t size = cur_node->size();
    2875      666420 :     if (size >= minimum_size) {
    2876             :       DCHECK_GE(available_, size);
    2877      665224 :       available_ -= size;
    2878      665224 :       if (cur_node == top()) {
    2879             :         set_top(cur_node->next());
    2880             :       }
    2881      665224 :       if (!prev_non_evac_node.is_null()) {
    2882             :         MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
    2883          50 :         if (chunk->owner()->identity() == CODE_SPACE) {
    2884           1 :           chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
    2885             :         }
    2886             :         prev_non_evac_node->set_next(cur_node->next());
    2887             :       }
    2888      665224 :       *node_size = size;
    2889      665224 :       return cur_node;
    2890             :     }
    2891             : 
    2892             :     prev_non_evac_node = cur_node;
    2893             :   }
    2894       24288 :   return FreeSpace();
    2895             : }
    2896             : 
    2897    20693928 : void FreeListCategory::Free(Address start, size_t size_in_bytes,
    2898             :                             FreeMode mode) {
    2899             :   DCHECK(page()->CanAllocate());
    2900             :   FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
    2901             :   free_space->set_next(top());
    2902             :   set_top(free_space);
    2903    20693928 :   available_ += size_in_bytes;
    2904    20693928 :   if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    2905             :     owner()->AddCategory(this);
    2906             :   }
    2907    20693928 : }
    2908             : 
    2909             : 
    2910       61478 : void FreeListCategory::RepairFreeList(Heap* heap) {
    2911             :   FreeSpace n = top();
    2912       61478 :   while (!n.is_null()) {
    2913             :     MapWordSlot map_location = n.map_slot();
    2914             :     // We can't use .is_null() here because *map_location returns an
    2915             :     // Object (for which "is null" is not defined, as it would be
    2916             :     // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
    2917           0 :     if (map_location.contains_value(kNullAddress)) {
    2918             :       map_location.store(ReadOnlyRoots(heap).free_space_map());
    2919             :     } else {
    2920             :       DCHECK(map_location.contains_value(
    2921             :           ReadOnlyRoots(heap).free_space_map().ptr()));
    2922             :     }
    2923             :     n = n->next();
    2924             :   }
    2925       61478 : }
    2926             : 
    2927           0 : void FreeListCategory::Relink() {
    2928             :   DCHECK(!is_linked());
    2929             :   owner()->AddCategory(this);
    2930           0 : }
    2931             : 
    2932           0 : FreeList::FreeList() : wasted_bytes_(0) {
    2933     6087147 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2934     2809452 :     categories_[i] = nullptr;
    2935             :   }
    2936      468243 :   Reset();
    2937           0 : }
    2938             : 
    2939             : 
    2940      690107 : void FreeList::Reset() {
    2941             :   ForAllFreeListCategories(
    2942             :       [](FreeListCategory* category) { category->Reset(); });
    2943     8971391 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2944     4140642 :     categories_[i] = nullptr;
    2945             :   }
    2946      690107 :   ResetStats();
    2947      690107 : }
    2948             : 
    2949    21116076 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
    2950             :   Page* page = Page::FromAddress(start);
    2951             :   page->DecreaseAllocatedBytes(size_in_bytes);
    2952             : 
    2953             :   // Blocks have to be a minimum size to hold free list items.
    2954    21116076 :   if (size_in_bytes < kMinBlockSize) {
    2955             :     page->add_wasted_memory(size_in_bytes);
    2956             :     wasted_bytes_ += size_in_bytes;
    2957      419984 :     return size_in_bytes;
    2958             :   }
    2959             : 
    2960             :   // Insert other blocks at the head of a free list of the appropriate
    2961             :   // magnitude.
    2962             :   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
    2963    20696092 :   page->free_list_category(type)->Free(start, size_in_bytes, mode);
    2964             :   DCHECK_EQ(page->AvailableInFreeList(),
    2965             :             page->AvailableInFreeListFromAllocatedBytes());
    2966    20682105 :   return 0;
    2967             : }
    2968             : 
    2969     2960424 : FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    2970             :                                size_t* node_size) {
    2971             :   FreeListCategoryIterator it(this, type);
    2972             :   FreeSpace node;
    2973     3024604 :   while (it.HasNext()) {
    2974             :     FreeListCategory* current = it.Next();
    2975      658436 :     node = current->PickNodeFromList(minimum_size, node_size);
    2976      658439 :     if (!node.is_null()) {
    2977             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2978      594259 :       return node;
    2979             :     }
    2980             :     RemoveCategory(current);
    2981             :   }
    2982     2366168 :   return node;
    2983             : }
    2984             : 
    2985           0 : FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
    2986             :                                   size_t minimum_size, size_t* node_size) {
    2987      403651 :   if (categories_[type] == nullptr) return FreeSpace();
    2988       73156 :   FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
    2989             :   if (!node.is_null()) {
    2990             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2991             :   }
    2992       73156 :   return node;
    2993             : }
    2994             : 
    2995     1259349 : FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
    2996             :                                         size_t* node_size,
    2997             :                                         size_t minimum_size) {
    2998             :   FreeListCategoryIterator it(this, type);
    2999             :   FreeSpace node;
    3000     1283637 :   while (it.HasNext()) {
    3001             :     FreeListCategory* current = it.Next();
    3002      689510 :     node = current->SearchForNodeInList(minimum_size, node_size);
    3003      689512 :     if (!node.is_null()) {
    3004             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3005      665224 :       return node;
    3006             :     }
    3007       24288 :     if (current->is_empty()) {
    3008             :       RemoveCategory(current);
    3009             :     }
    3010             :   }
    3011      594127 :   return node;
    3012             : }
    3013             : 
    3014     1853598 : FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
    3015             :   DCHECK_GE(kMaxBlockSize, size_in_bytes);
    3016             :   FreeSpace node;
    3017             :   // First try the allocation fast path: try to allocate the minimum element
    3018             :   // size of a free list category. This operation is constant time.
    3019             :   FreeListCategoryType type =
    3020             :       SelectFastAllocationFreeListCategoryType(size_in_bytes);
    3021     4814022 :   for (int i = type; i < kHuge && node.is_null(); i++) {
    3022             :     node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
    3023     2960407 :                       node_size);
    3024             :   }
    3025             : 
    3026     1853615 :   if (node.is_null()) {
    3027             :     // Next search the huge list for free list nodes. This takes linear time in
    3028             :     // the number of huge elements.
    3029     1259367 :     node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
    3030             :   }
    3031             : 
    3032     1853596 :   if (node.is_null() && type != kHuge) {
    3033             :     // We didn't find anything in the huge list. Now search the best fitting
    3034             :     // free list for a node that has at least the requested size.
    3035             :     type = SelectFreeListCategoryType(size_in_bytes);
    3036             :     node = TryFindNodeIn(type, size_in_bytes, node_size);
    3037             :   }
    3038             : 
    3039     1853596 :   if (!node.is_null()) {
    3040     1273106 :     Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
    3041             :   }
    3042             : 
    3043             :   DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3044     1853596 :   return node;
    3045             : }
    3046             : 
    3047      203454 : size_t FreeList::EvictFreeListItems(Page* page) {
    3048      203454 :   size_t sum = 0;
    3049     2441436 :   page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
    3050             :     DCHECK_EQ(this, category->owner());
    3051     1220718 :     sum += category->available();
    3052             :     RemoveCategory(category);
    3053             :     category->Reset();
    3054     1220718 :   });
    3055      203454 :   return sum;
    3056             : }
    3057             : 
    3058           0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
    3059           0 :   bool contained = false;
    3060             :   page->ForAllFreeListCategories(
    3061             :       [this, &contained](FreeListCategory* category) {
    3062           0 :         if (category->owner() == this && category->is_linked()) {
    3063           0 :           contained = true;
    3064             :         }
    3065             :       });
    3066           0 :   return contained;
    3067             : }
    3068             : 
    3069           0 : void FreeList::RepairLists(Heap* heap) {
    3070             :   ForAllFreeListCategories(
    3071      122956 :       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
    3072           0 : }
    3073             : 
    3074           0 : bool FreeList::AddCategory(FreeListCategory* category) {
    3075     6999767 :   FreeListCategoryType type = category->type_;
    3076             :   DCHECK_LT(type, kNumberOfCategories);
    3077     6999767 :   FreeListCategory* top = categories_[type];
    3078             : 
    3079     6999767 :   if (category->is_empty()) return false;
    3080     2466200 :   if (top == category) return false;
    3081             : 
    3082             :   // Common double-linked list insertion.
    3083     1848542 :   if (top != nullptr) {
    3084             :     top->set_prev(category);
    3085             :   }
    3086             :   category->set_next(top);
    3087     1848542 :   categories_[type] = category;
    3088           0 :   return true;
    3089             : }
    3090             : 
    3091           6 : void FreeList::RemoveCategory(FreeListCategory* category) {
    3092     2077172 :   FreeListCategoryType type = category->type_;
    3093             :   DCHECK_LT(type, kNumberOfCategories);
    3094     2077172 :   FreeListCategory* top = categories_[type];
    3095             : 
    3096             :   // Common double-linked list removal.
    3097     2077172 :   if (top == category) {
    3098      435360 :     categories_[type] = category->next();
    3099             :   }
    3100     2077172 :   if (category->prev() != nullptr) {
    3101             :     category->prev()->set_next(category->next());
    3102             :   }
    3103     2077172 :   if (category->next() != nullptr) {
    3104             :     category->next()->set_prev(category->prev());
    3105             :   }
    3106             :   category->set_next(nullptr);
    3107             :   category->set_prev(nullptr);
    3108           6 : }
    3109             : 
    3110           0 : void FreeList::PrintCategories(FreeListCategoryType type) {
    3111             :   FreeListCategoryIterator it(this, type);
    3112             :   PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
    3113           0 :          static_cast<void*>(categories_[type]), type);
    3114           0 :   while (it.HasNext()) {
    3115             :     FreeListCategory* current = it.Next();
    3116           0 :     PrintF("%p -> ", static_cast<void*>(current));
    3117             :   }
    3118           0 :   PrintF("null\n");
    3119           0 : }
    3120             : 
    3121             : 
    3122             : #ifdef DEBUG
    3123             : size_t FreeListCategory::SumFreeList() {
    3124             :   size_t sum = 0;
    3125             :   FreeSpace cur = top();
    3126             :   while (!cur.is_null()) {
    3127             :     // We can't use "cur->map()" here because both cur's map and the
    3128             :     // root can be null during bootstrapping.
    3129             :     DCHECK(cur->map_slot().contains_value(
    3130             :         page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
    3131             :     sum += cur->relaxed_read_size();
    3132             :     cur = cur->next();
    3133             :   }
    3134             :   return sum;
    3135             : }
    3136             : 
    3137             : int FreeListCategory::FreeListLength() {
    3138             :   int length = 0;
    3139             :   FreeSpace cur = top();
    3140             :   while (!cur.is_null()) {
    3141             :     length++;
    3142             :     cur = cur->next();
    3143             :     if (length == kVeryLongFreeList) return length;
    3144             :   }
    3145             :   return length;
    3146             : }
    3147             : 
    3148             : bool FreeList::IsVeryLong() {
    3149             :   int len = 0;
    3150             :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    3151             :     FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    3152             :     while (it.HasNext()) {
    3153             :       len += it.Next()->FreeListLength();
    3154             :       if (len >= FreeListCategory::kVeryLongFreeList) return true;
    3155             :     }
    3156             :   }
    3157             :   return false;
    3158             : }
    3159             : 
    3160             : 
    3161             : // This can take a very long time because it is linear in the number of entries
    3162             : // on the free list, so it should not be called if FreeListLength returns
    3163             : // kVeryLongFreeList.
    3164             : size_t FreeList::SumFreeLists() {
    3165             :   size_t sum = 0;
    3166             :   ForAllFreeListCategories(
    3167             :       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
    3168             :   return sum;
    3169             : }
    3170             : #endif
    3171             : 
    3172             : 
    3173             : // -----------------------------------------------------------------------------
    3174             : // OldSpace implementation
    3175             : 
    3176      221865 : void PagedSpace::PrepareForMarkCompact() {
    3177             :   // We don't have a linear allocation area while sweeping.  It will be restored
    3178             :   // on the first allocation after the sweep.
    3179      221865 :   FreeLinearAllocationArea();
    3180             : 
    3181             :   // Clear the free list before a full GC---it will be rebuilt afterward.
    3182      221865 :   free_list_.Reset();
    3183      221865 : }
    3184             : 
    3185    10431135 : size_t PagedSpace::SizeOfObjects() {
    3186    10431135 :   CHECK_GE(limit(), top());
    3187             :   DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
    3188    20862270 :   return Size() - (limit() - top());
    3189             : }
    3190             : 
    3191         213 : bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3192             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3193         213 :   if (collector->sweeping_in_progress()) {
    3194             :     // Wait for the sweeper threads here and complete the sweeping phase.
    3195          15 :     collector->EnsureSweepingCompleted();
    3196             : 
    3197             :     // After waiting for the sweeper threads, there may be new free-list
    3198             :     // entries.
    3199          15 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3200             :   }
    3201             :   return false;
    3202             : }
    3203             : 
    3204       11112 : bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3205             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3206       11112 :   if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
    3207           5 :     collector->sweeper()->ParallelSweepSpace(identity(), 0);
    3208           5 :     RefillFreeList();
    3209           5 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3210             :   }
    3211             :   return false;
    3212             : }
    3213             : 
    3214     1112423 : bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3215             :   VMState<GC> state(heap()->isolate());
    3216             :   RuntimeCallTimerScope runtime_timer(
    3217     1112423 :       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
    3218     2224852 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3219             : }
    3220             : 
    3221      171953 : bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3222      171953 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3223             : }
    3224             : 
    3225     1284375 : bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
    3226             :   // Allocation in this space has failed.
    3227             :   DCHECK_GE(size_in_bytes, 0);
    3228             :   const int kMaxPagesToSweep = 1;
    3229             : 
    3230     1284375 :   if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
    3231             : 
    3232             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3233             :   // Sweeping is still in progress.
    3234      514223 :   if (collector->sweeping_in_progress()) {
    3235      128861 :     if (FLAG_concurrent_sweeping && !is_local() &&
    3236       37644 :         !collector->sweeper()->AreSweeperTasksRunning()) {
    3237       24154 :       collector->EnsureSweepingCompleted();
    3238             :     }
    3239             : 
    3240             :     // First try to refill the free-list, concurrent sweeper threads
    3241             :     // may have freed some objects in the meantime.
    3242       91217 :     RefillFreeList();
    3243             : 
    3244             :     // Retry the free list allocation.
    3245       91258 :     if (RefillLinearAllocationAreaFromFreeList(
    3246             :             static_cast<size_t>(size_in_bytes)))
    3247             :       return true;
    3248             : 
    3249             :     // If sweeping is still in progress try to sweep pages.
    3250             :     int max_freed = collector->sweeper()->ParallelSweepSpace(
    3251       59194 :         identity(), size_in_bytes, kMaxPagesToSweep);
    3252       59201 :     RefillFreeList();
    3253       59201 :     if (max_freed >= size_in_bytes) {
    3254       39293 :       if (RefillLinearAllocationAreaFromFreeList(
    3255             :               static_cast<size_t>(size_in_bytes)))
    3256             :         return true;
    3257             :     }
    3258             :   }
    3259             : 
    3260      443744 :   if (is_local()) {
    3261             :     // The main thread may have acquired all swept pages. Try to steal from
    3262             :     // it. This can only happen during young generation evacuation.
    3263       37196 :     PagedSpace* main_space = heap()->paged_space(identity());
    3264       37196 :     Page* page = main_space->RemovePageSafe(size_in_bytes);
    3265       37209 :     if (page != nullptr) {
    3266       14686 :       AddPage(page);
    3267       14686 :       if (RefillLinearAllocationAreaFromFreeList(
    3268             :               static_cast<size_t>(size_in_bytes)))
    3269             :         return true;
    3270             :     }
    3271             :   }
    3272             : 
    3273      435356 :   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
    3274             :     DCHECK((CountTotalPages() > 1) ||
    3275             :            (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
    3276             :     return RefillLinearAllocationAreaFromFreeList(
    3277      424035 :         static_cast<size_t>(size_in_bytes));
    3278             :   }
    3279             : 
    3280             :   // If sweeper threads are active, wait for them at that point and steal
    3281             :   // elements form their free-lists. Allocation may still fail their which
    3282             :   // would indicate that there is not enough memory for the given allocation.
    3283       11325 :   return SweepAndRetryAllocation(size_in_bytes);
    3284             : }
    3285             : 
    3286             : // -----------------------------------------------------------------------------
    3287             : // MapSpace implementation
    3288             : 
    3289             : #ifdef VERIFY_HEAP
    3290             : void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
    3291             : #endif
    3292             : 
    3293       61533 : ReadOnlySpace::ReadOnlySpace(Heap* heap)
    3294             :     : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
    3295      123067 :       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
    3296       61534 : }
    3297             : 
    3298       61982 : void ReadOnlyPage::MakeHeaderRelocatable() {
    3299       61982 :   if (mutex_ != nullptr) {
    3300             :     // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
    3301       61534 :     delete mutex_;
    3302       61534 :     mutex_ = nullptr;
    3303       61534 :     local_tracker_ = nullptr;
    3304       61534 :     reservation_.Reset();
    3305             :   }
    3306       61982 : }
    3307             : 
    3308           0 : void ReadOnlySpace::Forget() {
    3309           0 :   for (Page* p : *this) {
    3310           0 :     heap()->memory_allocator()->PreFreeMemory(p);
    3311             :   }
    3312           0 : }
    3313             : 
    3314      123949 : void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
    3315             :   MemoryAllocator* memory_allocator = heap()->memory_allocator();
    3316      247898 :   for (Page* p : *this) {
    3317             :     ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
    3318      123949 :     if (access == PageAllocator::kRead) {
    3319       61982 :       page->MakeHeaderRelocatable();
    3320             :     }
    3321             : 
    3322             :     // Read only pages don't have valid reservation object so we get proper
    3323             :     // page allocator manually.
    3324             :     v8::PageAllocator* page_allocator =
    3325             :         memory_allocator->page_allocator(page->executable());
    3326      123949 :     CHECK(
    3327             :         SetPermissions(page_allocator, page->address(), page->size(), access));
    3328             :   }
    3329      123949 : }
    3330             : 
    3331             : // After we have booted, we have created a map which represents free space
    3332             : // on the heap.  If there was already a free list then the elements on it
    3333             : // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
    3334             : // fix them.
    3335       61478 : void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
    3336       61478 :   free_list_.RepairLists(heap());
    3337             :   // Each page may have a small free space that is not tracked by a free list.
    3338             :   // Those free spaces still contain null as their map pointer.
    3339             :   // Overwrite them with new fillers.
    3340      122956 :   for (Page* page : *this) {
    3341       61478 :     int size = static_cast<int>(page->wasted_memory());
    3342       61478 :     if (size == 0) {
    3343             :       // If there is no wasted memory then all free space is in the free list.
    3344             :       continue;
    3345             :     }
    3346           0 :     Address start = page->HighWaterMark();
    3347             :     Address end = page->area_end();
    3348           0 :     if (start < end - size) {
    3349             :       // A region at the high watermark is already in free list.
    3350           0 :       HeapObject filler = HeapObject::FromAddress(start);
    3351           0 :       CHECK(filler->IsFiller());
    3352           0 :       start += filler->Size();
    3353             :     }
    3354           0 :     CHECK_EQ(size, static_cast<int>(end - start));
    3355           0 :     heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
    3356             :   }
    3357       61478 : }
    3358             : 
    3359         625 : void ReadOnlySpace::ClearStringPaddingIfNeeded() {
    3360         802 :   if (is_string_padding_cleared_) return;
    3361             : 
    3362         448 :   WritableScope writable_scope(this);
    3363         896 :   for (Page* page : *this) {
    3364             :     HeapObjectIterator iterator(page);
    3365      507304 :     for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
    3366      506856 :       if (o->IsSeqOneByteString()) {
    3367      227584 :         SeqOneByteString::cast(o)->clear_padding();
    3368      279272 :       } else if (o->IsSeqTwoByteString()) {
    3369           0 :         SeqTwoByteString::cast(o)->clear_padding();
    3370             :       }
    3371             :     }
    3372             :   }
    3373         448 :   is_string_padding_cleared_ = true;
    3374             : }
    3375             : 
    3376       61534 : void ReadOnlySpace::MarkAsReadOnly() {
    3377             :   DCHECK(!is_marked_read_only_);
    3378       61982 :   FreeLinearAllocationArea();
    3379       61982 :   is_marked_read_only_ = true;
    3380       61982 :   SetPermissionsForPages(PageAllocator::kRead);
    3381       61534 : }
    3382             : 
    3383       61518 : void ReadOnlySpace::MarkAsReadWrite() {
    3384             :   DCHECK(is_marked_read_only_);
    3385       61966 :   SetPermissionsForPages(PageAllocator::kReadWrite);
    3386       61967 :   is_marked_read_only_ = false;
    3387       61519 : }
    3388             : 
    3389       58227 : Address LargePage::GetAddressToShrink(Address object_address,
    3390             :                                       size_t object_size) {
    3391       58227 :   if (executable() == EXECUTABLE) {
    3392             :     return 0;
    3393             :   }
    3394       34798 :   size_t used_size = ::RoundUp((object_address - address()) + object_size,
    3395             :                                MemoryAllocator::GetCommitPageSize());
    3396       17399 :   if (used_size < CommittedPhysicalMemory()) {
    3397          88 :     return address() + used_size;
    3398             :   }
    3399             :   return 0;
    3400             : }
    3401             : 
    3402          88 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
    3403          88 :   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
    3404          88 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3405             :   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
    3406          88 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3407          88 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
    3408          88 :   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
    3409          88 : }
    3410             : 
    3411             : // -----------------------------------------------------------------------------
    3412             : // LargeObjectIterator
    3413             : 
    3414       23460 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
    3415       23460 :   current_ = space->first_page();
    3416           0 : }
    3417             : 
    3418       26892 : HeapObject LargeObjectIterator::Next() {
    3419      233029 :   if (current_ == nullptr) return HeapObject();
    3420             : 
    3421             :   HeapObject object = current_->GetObject();
    3422        3432 :   current_ = current_->next_page();
    3423        3432 :   return object;
    3424             : }
    3425             : 
    3426             : // -----------------------------------------------------------------------------
    3427             : // LargeObjectSpace
    3428             : 
    3429       61534 : LargeObjectSpace::LargeObjectSpace(Heap* heap)
    3430       61534 :     : LargeObjectSpace(heap, LO_SPACE) {}
    3431             : 
    3432           0 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
    3433      184602 :     : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
    3434             : 
    3435      184554 : void LargeObjectSpace::TearDown() {
    3436      306418 :   while (!memory_chunk_list_.Empty()) {
    3437             :     LargePage* page = first_page();
    3438       60932 :     LOG(heap()->isolate(),
    3439             :         DeleteEvent("LargeObjectChunk",
    3440             :                     reinterpret_cast<void*>(page->address())));
    3441             :     memory_chunk_list_.Remove(page);
    3442       60931 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    3443             :   }
    3444      184554 : }
    3445             : 
    3446       17266 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
    3447       17266 :   return AllocateRaw(object_size, NOT_EXECUTABLE);
    3448             : }
    3449             : 
    3450       57118 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
    3451             :                                                Executability executable) {
    3452             :   // Check if we want to force a GC before growing the old space further.
    3453             :   // If so, fail the allocation.
    3454      114221 :   if (!heap()->CanExpandOldGeneration(object_size) ||
    3455       57103 :       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
    3456             :     return AllocationResult::Retry(identity());
    3457             :   }
    3458             : 
    3459       57085 :   LargePage* page = AllocateLargePage(object_size, executable);
    3460       57085 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3461             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3462             :   HeapObject object = page->GetObject();
    3463             :   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    3464             :       heap()->GCFlagsForIncrementalMarking(),
    3465       57085 :       kGCCallbackScheduleIdleGarbageCollection);
    3466       57085 :   if (heap()->incremental_marking()->black_allocation()) {
    3467             :     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
    3468             :   }
    3469             :   DCHECK_IMPLIES(
    3470             :       heap()->incremental_marking()->black_allocation(),
    3471             :       heap()->incremental_marking()->marking_state()->IsBlack(object));
    3472             :   page->InitializationMemoryFence();
    3473       57085 :   heap()->NotifyOldGenerationExpansion();
    3474       57084 :   AllocationStep(object_size, object->address(), object_size);
    3475       57084 :   return object;
    3476             : }
    3477             : 
    3478       70445 : LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
    3479             :                                                Executability executable) {
    3480       70445 :   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
    3481       70445 :       object_size, this, executable);
    3482       70444 :   if (page == nullptr) return nullptr;
    3483             :   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
    3484             : 
    3485       70444 :   AddPage(page, object_size);
    3486             : 
    3487             :   HeapObject object = page->GetObject();
    3488             : 
    3489             :   heap()->CreateFillerObjectAt(object->address(), object_size,
    3490       70445 :                                ClearRecordedSlots::kNo);
    3491       70445 :   return page;
    3492             : }
    3493             : 
    3494             : 
    3495         753 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
    3496             :   // On a platform that provides lazy committing of memory, we over-account
    3497             :   // the actually committed memory. There is no easy way right now to support
    3498             :   // precise accounting of committed memory in large object space.
    3499         753 :   return CommittedMemory();
    3500             : }
    3501             : 
    3502      537538 : LargePage* CodeLargeObjectSpace::FindPage(Address a) {
    3503      537538 :   const Address key = MemoryChunk::FromAddress(a)->address();
    3504             :   auto it = chunk_map_.find(key);
    3505      537538 :   if (it != chunk_map_.end()) {
    3506        5444 :     LargePage* page = it->second;
    3507        5444 :     CHECK(page->Contains(a));
    3508             :     return page;
    3509             :   }
    3510             :   return nullptr;
    3511             : }
    3512             : 
    3513      147910 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
    3514             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3515             :       heap()->incremental_marking()->non_atomic_marking_state();
    3516             :   LargeObjectIterator it(this);
    3517      206137 :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3518       58227 :     if (marking_state->IsBlackOrGrey(obj)) {
    3519             :       Marking::MarkWhite(marking_state->MarkBitFrom(obj));
    3520             :       MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    3521       58227 :       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
    3522             :       chunk->ResetProgressBar();
    3523             :       marking_state->SetLiveBytes(chunk, 0);
    3524             :     }
    3525             :     DCHECK(marking_state->IsWhite(obj));
    3526             :   }
    3527      147910 : }
    3528             : 
    3529       39848 : void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
    3530      159904 :   for (Address current = reinterpret_cast<Address>(page);
    3531       79952 :        current < reinterpret_cast<Address>(page) + page->size();
    3532             :        current += MemoryChunk::kPageSize) {
    3533       40104 :     chunk_map_[current] = page;
    3534             :   }
    3535       39849 : }
    3536             : 
    3537          22 : void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
    3538         460 :   for (Address current = page->address();
    3539         219 :        current < reinterpret_cast<Address>(page) + page->size();
    3540             :        current += MemoryChunk::kPageSize) {
    3541             :     chunk_map_.erase(current);
    3542             :   }
    3543          22 : }
    3544             : 
    3545        4487 : void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
    3546             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3547             :   DCHECK(page->IsLargePage());
    3548             :   DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
    3549             :   DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
    3550        4487 :   size_t object_size = static_cast<size_t>(page->GetObject()->Size());
    3551        4487 :   static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
    3552        4487 :   AddPage(page, object_size);
    3553             :   page->ClearFlag(MemoryChunk::FROM_PAGE);
    3554             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3555        4487 :   page->set_owner(this);
    3556        4487 : }
    3557             : 
    3558       74931 : void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3559       74931 :   size_ += static_cast<int>(page->size());
    3560             :   AccountCommitted(page->size());
    3561       74931 :   objects_size_ += object_size;
    3562       74931 :   page_count_++;
    3563             :   memory_chunk_list_.PushBack(page);
    3564       74931 : }
    3565             : 
    3566       14000 : void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3567       14000 :   size_ -= static_cast<int>(page->size());
    3568             :   AccountUncommitted(page->size());
    3569       14000 :   objects_size_ -= object_size;
    3570       14000 :   page_count_--;
    3571             :   memory_chunk_list_.Remove(page);
    3572       14000 : }
    3573             : 
    3574      221865 : void LargeObjectSpace::FreeUnmarkedObjects() {
    3575             :   LargePage* current = first_page();
    3576             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3577             :       heap()->incremental_marking()->non_atomic_marking_state();
    3578             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3579             :   // updating it after every GC.
    3580             :   size_t surviving_object_size = 0;
    3581      342985 :   while (current) {
    3582             :     LargePage* next_current = current->next_page();
    3583       60560 :     HeapObject object = current->GetObject();
    3584             :     DCHECK(!marking_state->IsGrey(object));
    3585       60560 :     size_t size = static_cast<size_t>(object->Size());
    3586       60560 :     if (marking_state->IsBlack(object)) {
    3587             :       Address free_start;
    3588       58227 :       surviving_object_size += size;
    3589       58227 :       if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
    3590             :           0) {
    3591             :         DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
    3592          88 :         current->ClearOutOfLiveRangeSlots(free_start);
    3593             :         const size_t bytes_to_free =
    3594         176 :             current->size() - (free_start - current->address());
    3595          88 :         heap()->memory_allocator()->PartialFreeMemory(
    3596             :             current, free_start, bytes_to_free,
    3597         176 :             current->area_start() + object->Size());
    3598          88 :         size_ -= bytes_to_free;
    3599             :         AccountUncommitted(bytes_to_free);
    3600             :       }
    3601             :     } else {
    3602        2333 :       RemovePage(current, size);
    3603             :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
    3604        2333 :           current);
    3605             :     }
    3606             :     current = next_current;
    3607             :   }
    3608      221865 :   objects_size_ = surviving_object_size;
    3609      221865 : }
    3610             : 
    3611          90 : bool LargeObjectSpace::Contains(HeapObject object) {
    3612             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3613             : 
    3614          90 :   bool owned = (chunk->owner() == this);
    3615             : 
    3616             :   SLOW_DCHECK(!owned || ContainsSlow(object->address()));
    3617             : 
    3618          90 :   return owned;
    3619             : }
    3620             : 
    3621           0 : bool LargeObjectSpace::ContainsSlow(Address addr) {
    3622           0 :   for (LargePage* page : *this) {
    3623           0 :     if (page->Contains(addr)) return true;
    3624             :   }
    3625             :   return false;
    3626             : }
    3627             : 
    3628       23460 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
    3629       23460 :   return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
    3630             : }
    3631             : 
    3632             : #ifdef VERIFY_HEAP
    3633             : // We do not assume that the large object iterator works, because it depends
    3634             : // on the invariants we are checking during verification.
    3635             : void LargeObjectSpace::Verify(Isolate* isolate) {
    3636             :   size_t external_backing_store_bytes[kNumTypes];
    3637             : 
    3638             :   for (int i = 0; i < kNumTypes; i++) {
    3639             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    3640             :   }
    3641             : 
    3642             :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3643             :        chunk = chunk->next_page()) {
    3644             :     // Each chunk contains an object that starts at the large object page's
    3645             :     // object area start.
    3646             :     HeapObject object = chunk->GetObject();
    3647             :     Page* page = Page::FromHeapObject(object);
    3648             :     CHECK(object->address() == page->area_start());
    3649             : 
    3650             :     // The first word should be a map, and we expect all map pointers to be
    3651             :     // in map space or read-only space.
    3652             :     Map map = object->map();
    3653             :     CHECK(map->IsMap());
    3654             :     CHECK(heap()->map_space()->Contains(map) ||
    3655             :           heap()->read_only_space()->Contains(map));
    3656             : 
    3657             :     // We have only the following types in the large object space:
    3658             :     if (!(object->IsAbstractCode() || object->IsSeqString() ||
    3659             :           object->IsExternalString() || object->IsThinString() ||
    3660             :           object->IsFixedArray() || object->IsFixedDoubleArray() ||
    3661             :           object->IsWeakFixedArray() || object->IsWeakArrayList() ||
    3662             :           object->IsPropertyArray() || object->IsByteArray() ||
    3663             :           object->IsFeedbackVector() || object->IsBigInt() ||
    3664             :           object->IsFreeSpace() || object->IsFeedbackMetadata() ||
    3665             :           object->IsContext() ||
    3666             :           object->IsUncompiledDataWithoutPreparseData() ||
    3667             :           object->IsPreparseData()) &&
    3668             :         !FLAG_young_generation_large_objects) {
    3669             :       FATAL("Found invalid Object (instance_type=%i) in large object space.",
    3670             :             object->map()->instance_type());
    3671             :     }
    3672             : 
    3673             :     // The object itself should look OK.
    3674             :     object->ObjectVerify(isolate);
    3675             : 
    3676             :     if (!FLAG_verify_heap_skip_remembered_set) {
    3677             :       heap()->VerifyRememberedSetFor(object);
    3678             :     }
    3679             : 
    3680             :     // Byte arrays and strings don't have interior pointers.
    3681             :     if (object->IsAbstractCode()) {
    3682             :       VerifyPointersVisitor code_visitor(heap());
    3683             :       object->IterateBody(map, object->Size(), &code_visitor);
    3684             :     } else if (object->IsFixedArray()) {
    3685             :       FixedArray array = FixedArray::cast(object);
    3686             :       for (int j = 0; j < array->length(); j++) {
    3687             :         Object element = array->get(j);
    3688             :         if (element->IsHeapObject()) {
    3689             :           HeapObject element_object = HeapObject::cast(element);
    3690             :           CHECK(heap()->Contains(element_object));
    3691             :           CHECK(element_object->map()->IsMap());
    3692             :         }
    3693             :       }
    3694             :     } else if (object->IsPropertyArray()) {
    3695             :       PropertyArray array = PropertyArray::cast(object);
    3696             :       for (int j = 0; j < array->length(); j++) {
    3697             :         Object property = array->get(j);
    3698             :         if (property->IsHeapObject()) {
    3699             :           HeapObject property_object = HeapObject::cast(property);
    3700             :           CHECK(heap()->Contains(property_object));
    3701             :           CHECK(property_object->map()->IsMap());
    3702             :         }
    3703             :       }
    3704             :     }
    3705             :     for (int i = 0; i < kNumTypes; i++) {
    3706             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3707             :       external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
    3708             :     }
    3709             :   }
    3710             :   for (int i = 0; i < kNumTypes; i++) {
    3711             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3712             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    3713             :   }
    3714             : }
    3715             : #endif
    3716             : 
    3717             : #ifdef DEBUG
    3718             : void LargeObjectSpace::Print() {
    3719             :   StdoutStream os;
    3720             :   LargeObjectIterator it(this);
    3721             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3722             :     obj->Print(os);
    3723             :   }
    3724             : }
    3725             : 
    3726             : void Page::Print() {
    3727             :   // Make a best-effort to print the objects in the page.
    3728             :   PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
    3729             :          this->owner()->name());
    3730             :   printf(" --------------------------------------\n");
    3731             :   HeapObjectIterator objects(this);
    3732             :   unsigned mark_size = 0;
    3733             :   for (HeapObject object = objects.Next(); !object.is_null();
    3734             :        object = objects.Next()) {
    3735             :     bool is_marked =
    3736             :         heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
    3737             :     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    3738             :     if (is_marked) {
    3739             :       mark_size += object->Size();
    3740             :     }
    3741             :     object->ShortPrint();
    3742             :     PrintF("\n");
    3743             :   }
    3744             :   printf(" --------------------------------------\n");
    3745             :   printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
    3746             :          heap()->incremental_marking()->marking_state()->live_bytes(this));
    3747             : }
    3748             : 
    3749             : #endif  // DEBUG
    3750             : 
    3751       61534 : NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
    3752             :     : LargeObjectSpace(heap, NEW_LO_SPACE),
    3753             :       pending_object_(0),
    3754      123068 :       capacity_(capacity) {}
    3755             : 
    3756       15777 : AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
    3757             :   // Do not allocate more objects if promoting the existing object would exceed
    3758             :   // the old generation capacity.
    3759       15777 :   if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
    3760             :     return AllocationResult::Retry(identity());
    3761             :   }
    3762             : 
    3763             :   // Allocation for the first object must succeed independent from the capacity.
    3764       15769 :   if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
    3765             :     return AllocationResult::Retry(identity());
    3766             :   }
    3767             : 
    3768       13360 :   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
    3769       13360 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3770             : 
    3771             :   // The size of the first object may exceed the capacity.
    3772       26720 :   capacity_ = Max(capacity_, SizeOfObjects());
    3773             : 
    3774             :   HeapObject result = page->GetObject();
    3775             :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3776             :   page->SetFlag(MemoryChunk::TO_PAGE);
    3777             :   pending_object_.store(result->address(), std::memory_order_relaxed);
    3778             : #ifdef ENABLE_MINOR_MC
    3779       13360 :   if (FLAG_minor_mc) {
    3780             :     page->AllocateYoungGenerationBitmap();
    3781             :     heap()
    3782             :         ->minor_mark_compact_collector()
    3783             :         ->non_atomic_marking_state()
    3784           0 :         ->ClearLiveness(page);
    3785             :   }
    3786             : #endif  // ENABLE_MINOR_MC
    3787             :   page->InitializationMemoryFence();
    3788             :   DCHECK(page->IsLargePage());
    3789             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3790       13360 :   AllocationStep(object_size, result->address(), object_size);
    3791       13360 :   return result;
    3792             : }
    3793             : 
    3794       11670 : size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
    3795             : 
    3796       94928 : void NewLargeObjectSpace::Flip() {
    3797      119530 :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3798             :        chunk = chunk->next_page()) {
    3799             :     chunk->SetFlag(MemoryChunk::FROM_PAGE);
    3800             :     chunk->ClearFlag(MemoryChunk::TO_PAGE);
    3801             :   }
    3802       94928 : }
    3803             : 
    3804       20973 : void NewLargeObjectSpace::FreeDeadObjects(
    3805             :     const std::function<bool(HeapObject)>& is_dead) {
    3806             :   bool is_marking = heap()->incremental_marking()->IsMarking();
    3807             :   size_t surviving_object_size = 0;
    3808             :   bool freed_pages = false;
    3809       35333 :   for (auto it = begin(); it != end();) {
    3810             :     LargePage* page = *it;
    3811             :     it++;
    3812        7180 :     HeapObject object = page->GetObject();
    3813        7180 :     size_t size = static_cast<size_t>(object->Size());
    3814        7180 :     if (is_dead(object)) {
    3815             :       freed_pages = true;
    3816        7180 :       RemovePage(page, size);
    3817        7180 :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    3818        7180 :       if (FLAG_concurrent_marking && is_marking) {
    3819         791 :         heap()->concurrent_marking()->ClearMemoryChunkData(page);
    3820             :       }
    3821             :     } else {
    3822           0 :       surviving_object_size += size;
    3823             :     }
    3824             :   }
    3825             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3826             :   // updating it after every GC.
    3827       20973 :   objects_size_ = surviving_object_size;
    3828       20973 :   if (freed_pages) {
    3829        1793 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    3830             :   }
    3831       20973 : }
    3832             : 
    3833      121211 : void NewLargeObjectSpace::SetCapacity(size_t capacity) {
    3834      242422 :   capacity_ = Max(capacity, SizeOfObjects());
    3835      121211 : }
    3836             : 
    3837       61534 : CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
    3838             :     : LargeObjectSpace(heap, CODE_LO_SPACE),
    3839      123068 :       chunk_map_(kInitialChunkMapCapacity) {}
    3840             : 
    3841       39852 : AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
    3842       39852 :   return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
    3843             : }
    3844             : 
    3845       39848 : void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3846       39848 :   LargeObjectSpace::AddPage(page, object_size);
    3847       39848 :   InsertChunkMapEntries(page);
    3848       39849 : }
    3849             : 
    3850          22 : void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3851          22 :   RemoveChunkMapEntries(page);
    3852          22 :   LargeObjectSpace::RemovePage(page, object_size);
    3853          22 : }
    3854             : 
    3855             : }  // namespace internal
    3856      120216 : }  // namespace v8

Generated by: LCOV version 1.10