LCOV - code coverage report
Current view: top level - src/heap - spaces.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 1186 1338 88.6 %
Date: 2019-04-19 Functions: 209 250 83.6 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/spaces.h"
       6             : 
       7             : #include <utility>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/macros.h"
      11             : #include "src/base/platform/semaphore.h"
      12             : #include "src/base/template-utils.h"
      13             : #include "src/counters.h"
      14             : #include "src/heap/array-buffer-tracker.h"
      15             : #include "src/heap/concurrent-marking.h"
      16             : #include "src/heap/gc-tracer.h"
      17             : #include "src/heap/heap-controller.h"
      18             : #include "src/heap/incremental-marking-inl.h"
      19             : #include "src/heap/mark-compact.h"
      20             : #include "src/heap/remembered-set.h"
      21             : #include "src/heap/slot-set.h"
      22             : #include "src/heap/sweeper.h"
      23             : #include "src/msan.h"
      24             : #include "src/objects-inl.h"
      25             : #include "src/objects/free-space-inl.h"
      26             : #include "src/objects/js-array-buffer-inl.h"
      27             : #include "src/objects/js-array-inl.h"
      28             : #include "src/ostreams.h"
      29             : #include "src/snapshot/snapshot.h"
      30             : #include "src/v8.h"
      31             : #include "src/vm-state-inl.h"
      32             : 
      33             : namespace v8 {
      34             : namespace internal {
      35             : 
      36             : // These checks are here to ensure that the lower 32 bits of any real heap
      37             : // object can't overlap with the lower 32 bits of cleared weak reference value
      38             : // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
      39             : // in order to figure out if it's a cleared weak reference or not.
      40             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
      41             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
      42             : STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
      43             : 
      44             : // ----------------------------------------------------------------------------
      45             : // HeapObjectIterator
      46             : 
      47           6 : HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
      48             :     : cur_addr_(kNullAddress),
      49             :       cur_end_(kNullAddress),
      50             :       space_(space),
      51             :       page_range_(space->first_page(), nullptr),
      52       31512 :       current_page_(page_range_.begin()) {}
      53             : 
      54           0 : HeapObjectIterator::HeapObjectIterator(Page* page)
      55             :     : cur_addr_(kNullAddress),
      56             :       cur_end_(kNullAddress),
      57             :       space_(reinterpret_cast<PagedSpace*>(page->owner())),
      58             :       page_range_(page),
      59         896 :       current_page_(page_range_.begin()) {
      60             : #ifdef DEBUG
      61             :   Space* owner = page->owner();
      62             :   DCHECK(owner == page->heap()->old_space() ||
      63             :          owner == page->heap()->map_space() ||
      64             :          owner == page->heap()->code_space() ||
      65             :          owner == page->heap()->read_only_space());
      66             : #endif  // DEBUG
      67           0 : }
      68             : 
      69             : // We have hit the end of the page and should advance to the next block of
      70             : // objects.  This happens at the end of the page.
      71       89383 : bool HeapObjectIterator::AdvanceToNextPage() {
      72             :   DCHECK_EQ(cur_addr_, cur_end_);
      73       89383 :   if (current_page_ == page_range_.end()) return false;
      74             :   Page* cur_page = *(current_page_++);
      75       57431 :   Heap* heap = space_->heap();
      76             : 
      77       57431 :   heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
      78             : #ifdef ENABLE_MINOR_MC
      79       57431 :   if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
      80             :     heap->minor_mark_compact_collector()->MakeIterable(
      81             :         cur_page, MarkingTreatmentMode::CLEAR,
      82           0 :         FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
      83             : #else
      84             :   DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
      85             : #endif  // ENABLE_MINOR_MC
      86       57431 :   cur_addr_ = cur_page->area_start();
      87       57431 :   cur_end_ = cur_page->area_end();
      88             :   DCHECK(cur_page->SweepingDone());
      89       57431 :   return true;
      90             : }
      91             : 
      92       96984 : PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
      93       96984 :     : heap_(heap) {
      94             :   DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
      95             : 
      96      872856 :   for (SpaceIterator it(heap_); it.has_next();) {
      97      775872 :     it.next()->PauseAllocationObservers();
      98             :   }
      99       96984 : }
     100             : 
     101      193968 : PauseAllocationObserversScope::~PauseAllocationObserversScope() {
     102      872856 :   for (SpaceIterator it(heap_); it.has_next();) {
     103      775872 :     it.next()->ResumeAllocationObservers();
     104             :   }
     105       96984 : }
     106             : 
     107             : static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
     108             :     LAZY_INSTANCE_INITIALIZER;
     109             : 
     110       63463 : Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
     111       63463 :   base::MutexGuard guard(&mutex_);
     112             :   auto it = recently_freed_.find(code_range_size);
     113       63463 :   if (it == recently_freed_.end() || it->second.empty()) {
     114       61305 :     return reinterpret_cast<Address>(GetRandomMmapAddr());
     115             :   }
     116        2158 :   Address result = it->second.back();
     117             :   it->second.pop_back();
     118        2158 :   return result;
     119             : }
     120             : 
     121       63445 : void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
     122             :                                                 size_t code_range_size) {
     123       63445 :   base::MutexGuard guard(&mutex_);
     124       63445 :   recently_freed_[code_range_size].push_back(code_range_start);
     125       63445 : }
     126             : 
     127             : // -----------------------------------------------------------------------------
     128             : // MemoryAllocator
     129             : //
     130             : 
     131       63457 : MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
     132             :                                  size_t code_range_size)
     133             :     : isolate_(isolate),
     134       63457 :       data_page_allocator_(isolate->page_allocator()),
     135             :       code_page_allocator_(nullptr),
     136             :       capacity_(RoundUp(capacity, Page::kPageSize)),
     137             :       size_(0),
     138             :       size_executable_(0),
     139             :       lowest_ever_allocated_(static_cast<Address>(-1ll)),
     140             :       highest_ever_allocated_(kNullAddress),
     141      253828 :       unmapper_(isolate->heap(), this) {
     142       63457 :   InitializeCodePageAllocator(data_page_allocator_, code_range_size);
     143       63457 : }
     144             : 
     145       63457 : void MemoryAllocator::InitializeCodePageAllocator(
     146             :     v8::PageAllocator* page_allocator, size_t requested) {
     147             :   DCHECK_NULL(code_page_allocator_instance_.get());
     148             : 
     149       63457 :   code_page_allocator_ = page_allocator;
     150             : 
     151       63457 :   if (requested == 0) {
     152             :     if (!kRequiresCodeRange) return;
     153             :     // When a target requires the code range feature, we put all code objects
     154             :     // in a kMaximalCodeRangeSize range of virtual address space, so that
     155             :     // they can call each other with near calls.
     156             :     requested = kMaximalCodeRangeSize;
     157           1 :   } else if (requested <= kMinimumCodeRangeSize) {
     158             :     requested = kMinimumCodeRangeSize;
     159             :   }
     160             : 
     161             :   const size_t reserved_area =
     162             :       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
     163             :   if (requested < (kMaximalCodeRangeSize - reserved_area)) {
     164             :     requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
     165             :     // Fullfilling both reserved pages requirement and huge code area
     166             :     // alignments is not supported (requires re-implementation).
     167             :     DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
     168             :   }
     169             :   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
     170             : 
     171             :   Address hint =
     172      126914 :       RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
     173       63457 :                 page_allocator->AllocatePageSize());
     174             :   VirtualMemory reservation(
     175             :       page_allocator, requested, reinterpret_cast<void*>(hint),
     176      190371 :       Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
     177       63457 :   if (!reservation.IsReserved()) {
     178           0 :     V8::FatalProcessOutOfMemory(isolate_,
     179           0 :                                 "CodeRange setup: allocate virtual memory");
     180             :   }
     181       63457 :   code_range_ = reservation.region();
     182             : 
     183             :   // We are sure that we have mapped a block of requested addresses.
     184             :   DCHECK_GE(reservation.size(), requested);
     185             :   Address base = reservation.address();
     186             : 
     187             :   // On some platforms, specifically Win64, we need to reserve some pages at
     188             :   // the beginning of an executable space. See
     189             :   //   https://cs.chromium.org/chromium/src/components/crash/content/
     190             :   //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
     191             :   // for details.
     192             :   if (reserved_area > 0) {
     193             :     if (!reservation.SetPermissions(base, reserved_area,
     194             :                                     PageAllocator::kReadWrite))
     195             :       V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
     196             : 
     197             :     base += reserved_area;
     198             :   }
     199       63457 :   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
     200             :   size_t size =
     201       63457 :       RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
     202       63457 :                 MemoryChunk::kPageSize);
     203             :   DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
     204             : 
     205       63457 :   LOG(isolate_,
     206             :       NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
     207             :                requested));
     208             : 
     209       63457 :   heap_reservation_.TakeControl(&reservation);
     210      126914 :   code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
     211             :       page_allocator, aligned_base, size,
     212             :       static_cast<size_t>(MemoryChunk::kAlignment));
     213       63457 :   code_page_allocator_ = code_page_allocator_instance_.get();
     214             : }
     215             : 
     216       63442 : void MemoryAllocator::TearDown() {
     217       63442 :   unmapper()->TearDown();
     218             : 
     219             :   // Check that spaces were torn down before MemoryAllocator.
     220             :   DCHECK_EQ(size_, 0u);
     221             :   // TODO(gc) this will be true again when we fix FreeMemory.
     222             :   // DCHECK_EQ(0, size_executable_);
     223       63442 :   capacity_ = 0;
     224             : 
     225       63442 :   if (last_chunk_.IsReserved()) {
     226           0 :     last_chunk_.Free();
     227             :   }
     228             : 
     229       63442 :   if (code_page_allocator_instance_.get()) {
     230             :     DCHECK(!code_range_.is_empty());
     231             :     code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
     232       63442 :                                                             code_range_.size());
     233       63442 :     code_range_ = base::AddressRegion();
     234             :     code_page_allocator_instance_.reset();
     235             :   }
     236       63442 :   code_page_allocator_ = nullptr;
     237       63442 :   data_page_allocator_ = nullptr;
     238       63442 : }
     239             : 
     240      307669 : class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
     241             :  public:
     242             :   explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
     243             :       : CancelableTask(isolate),
     244             :         unmapper_(unmapper),
     245      307892 :         tracer_(isolate->heap()->tracer()) {}
     246             : 
     247             :  private:
     248      151091 :   void RunInternal() override {
     249      604377 :     TRACE_BACKGROUND_GC(tracer_,
     250             :                         GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
     251      151054 :     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     252      151165 :     unmapper_->active_unmapping_tasks_--;
     253      151165 :     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
     254      151201 :     if (FLAG_trace_unmapper) {
     255           0 :       PrintIsolate(unmapper_->heap_->isolate(),
     256           0 :                    "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
     257             :     }
     258      151210 :   }
     259             : 
     260             :   Unmapper* const unmapper_;
     261             :   GCTracer* const tracer_;
     262             :   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
     263             : };
     264             : 
     265      233890 : void MemoryAllocator::Unmapper::FreeQueuedChunks() {
     266      233890 :   if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
     267      154957 :     if (!MakeRoomForNewTasks()) {
     268             :       // kMaxUnmapperTasks are already running. Avoid creating any more.
     269        1011 :       if (FLAG_trace_unmapper) {
     270           0 :         PrintIsolate(heap_->isolate(),
     271             :                      "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
     272           0 :                      kMaxUnmapperTasks);
     273             :       }
     274        1011 :       return;
     275             :     }
     276      307892 :     auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
     277      153946 :     if (FLAG_trace_unmapper) {
     278           0 :       PrintIsolate(heap_->isolate(),
     279             :                    "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
     280           0 :                    task->id());
     281             :     }
     282             :     DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
     283             :     DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
     284             :     DCHECK_GE(active_unmapping_tasks_, 0);
     285             :     active_unmapping_tasks_++;
     286      307892 :     task_ids_[pending_unmapping_tasks_++] = task->id();
     287      461838 :     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
     288             :   } else {
     289       78933 :     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     290             :   }
     291             : }
     292             : 
     293      152294 : void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
     294      460186 :   for (int i = 0; i < pending_unmapping_tasks_; i++) {
     295      307892 :     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
     296             :         TryAbortResult::kTaskAborted) {
     297      151210 :       pending_unmapping_tasks_semaphore_.Wait();
     298             :     }
     299             :   }
     300      152294 :   pending_unmapping_tasks_ = 0;
     301             :   active_unmapping_tasks_ = 0;
     302             : 
     303      152294 :   if (FLAG_trace_unmapper) {
     304             :     PrintIsolate(
     305           0 :         heap_->isolate(),
     306           0 :         "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
     307             :   }
     308      152294 : }
     309             : 
     310       94944 : void MemoryAllocator::Unmapper::PrepareForGC() {
     311             :   // Free non-regular chunks because they cannot be re-used.
     312       94944 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     313       94944 : }
     314             : 
     315       66223 : void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
     316       66223 :   CancelAndWaitForPendingTasks();
     317       66223 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     318       66223 : }
     319             : 
     320      154957 : bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
     321             :   DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
     322             : 
     323      154957 :   if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
     324             :     // All previous unmapping tasks have been run to completion.
     325             :     // Finalize those tasks to make room for new ones.
     326       86071 :     CancelAndWaitForPendingTasks();
     327             :   }
     328      154957 :   return pending_unmapping_tasks_ != kMaxUnmapperTasks;
     329             : }
     330             : 
     331      454727 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
     332             :   MemoryChunk* chunk = nullptr;
     333      481867 :   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     334       13570 :     allocator_->PerformFreeMemory(chunk);
     335             :   }
     336      454750 : }
     337             : 
     338             : template <MemoryAllocator::Unmapper::FreeMode mode>
     339      359545 : void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
     340             :   MemoryChunk* chunk = nullptr;
     341      359545 :   if (FLAG_trace_unmapper) {
     342           0 :     PrintIsolate(
     343           0 :         heap_->isolate(),
     344             :         "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
     345             :         NumberOfChunks());
     346             :   }
     347             :   // Regular chunks.
     348      781880 :   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
     349             :     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
     350      422271 :     allocator_->PerformFreeMemory(chunk);
     351      422257 :     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
     352             :   }
     353             :   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
     354             :     // The previous loop uncommitted any pages marked as pooled and added them
     355             :     // to the pooled list. In case of kReleasePooled we need to free them
     356             :     // though.
     357      376627 :     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
     358      376627 :       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
     359             :     }
     360             :   }
     361      359807 :   PerformFreeMemoryOnQueuedNonRegularChunks();
     362      359802 : }
     363             : 
     364       63444 : void MemoryAllocator::Unmapper::TearDown() {
     365       63444 :   CHECK_EQ(0, pending_unmapping_tasks_);
     366       63444 :   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
     367             :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     368             :     DCHECK(chunks_[i].empty());
     369             :   }
     370       63444 : }
     371             : 
     372           0 : size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
     373           0 :   base::MutexGuard guard(&mutex_);
     374           0 :   return chunks_[kRegular].size() + chunks_[kNonRegular].size();
     375             : }
     376             : 
     377           5 : int MemoryAllocator::Unmapper::NumberOfChunks() {
     378           5 :   base::MutexGuard guard(&mutex_);
     379             :   size_t result = 0;
     380          35 :   for (int i = 0; i < kNumberOfChunkQueues; i++) {
     381          30 :     result += chunks_[i].size();
     382             :   }
     383          10 :   return static_cast<int>(result);
     384             : }
     385             : 
     386           0 : size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
     387           0 :   base::MutexGuard guard(&mutex_);
     388             : 
     389             :   size_t sum = 0;
     390             :   // kPooled chunks are already uncommited. We only have to account for
     391             :   // kRegular and kNonRegular chunks.
     392           0 :   for (auto& chunk : chunks_[kRegular]) {
     393           0 :     sum += chunk->size();
     394             :   }
     395           0 :   for (auto& chunk : chunks_[kNonRegular]) {
     396           0 :     sum += chunk->size();
     397             :   }
     398           0 :   return sum;
     399             : }
     400             : 
     401       46641 : bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
     402             :   Address base = reservation->address();
     403             :   size_t size = reservation->size();
     404       46641 :   if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
     405             :     return false;
     406             :   }
     407       46641 :   UpdateAllocatedSpaceLimits(base, base + size);
     408       93282 :   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
     409       46641 :   return true;
     410             : }
     411             : 
     412      402723 : bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
     413             :   size_t size = reservation->size();
     414      402723 :   if (!reservation->SetPermissions(reservation->address(), size,
     415             :                                    PageAllocator::kNoAccess)) {
     416             :     return false;
     417             :   }
     418      805416 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
     419      402713 :   return true;
     420             : }
     421             : 
     422      439054 : void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
     423             :                                  Address base, size_t size) {
     424      439054 :   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
     425      439054 : }
     426             : 
     427      918538 : Address MemoryAllocator::AllocateAlignedMemory(
     428             :     size_t reserve_size, size_t commit_size, size_t alignment,
     429             :     Executability executable, void* hint, VirtualMemory* controller) {
     430             :   v8::PageAllocator* page_allocator = this->page_allocator(executable);
     431             :   DCHECK(commit_size <= reserve_size);
     432     1837077 :   VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
     433      918539 :   if (!reservation.IsReserved()) return kNullAddress;
     434             :   Address base = reservation.address();
     435             :   size_ += reservation.size();
     436             : 
     437      918539 :   if (executable == EXECUTABLE) {
     438      131727 :     if (!CommitExecutableMemory(&reservation, base, commit_size,
     439             :                                 reserve_size)) {
     440             :       base = kNullAddress;
     441             :     }
     442             :   } else {
     443      786812 :     if (reservation.SetPermissions(base, commit_size,
     444             :                                    PageAllocator::kReadWrite)) {
     445      786812 :       UpdateAllocatedSpaceLimits(base, base + commit_size);
     446             :     } else {
     447             :       base = kNullAddress;
     448             :     }
     449             :   }
     450             : 
     451      918539 :   if (base == kNullAddress) {
     452             :     // Failed to commit the body. Free the mapping and any partially committed
     453             :     // regions inside it.
     454           0 :     reservation.Free();
     455             :     size_ -= reserve_size;
     456           0 :     return kNullAddress;
     457             :   }
     458             : 
     459      918539 :   controller->TakeControl(&reservation);
     460      918539 :   return base;
     461             : }
     462             : 
     463     2779837 : void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
     464             :   base::AddressRegion memory_area =
     465     2779837 :       MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
     466     2781530 :   if (memory_area.size() != 0) {
     467       27566 :     MemoryAllocator* memory_allocator = heap_->memory_allocator();
     468             :     v8::PageAllocator* page_allocator =
     469             :         memory_allocator->page_allocator(executable());
     470       27566 :     CHECK(page_allocator->DiscardSystemPages(
     471             :         reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
     472             :   }
     473     2781526 : }
     474             : 
     475           0 : size_t MemoryChunkLayout::CodePageGuardStartOffset() {
     476             :   // We are guarding code pages: the first OS page after the header
     477             :   // will be protected as non-writable.
     478           0 :   return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
     479             : }
     480             : 
     481         500 : size_t MemoryChunkLayout::CodePageGuardSize() {
     482     7271452 :   return MemoryAllocator::GetCommitPageSize();
     483             : }
     484             : 
     485     7007506 : intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
     486             :   // We are guarding code pages: the first OS page after the header
     487             :   // will be protected as non-writable.
     488     7007498 :   return CodePageGuardStartOffset() + CodePageGuardSize();
     489             : }
     490             : 
     491           0 : intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
     492             :   // We are guarding code pages: the last OS page will be protected as
     493             :   // non-writable.
     494      587937 :   return Page::kPageSize -
     495      587937 :          static_cast<int>(MemoryAllocator::GetCommitPageSize());
     496             : }
     497             : 
     498      587937 : size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
     499      587937 :   size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
     500             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     501      587937 :   return memory;
     502             : }
     503             : 
     504           5 : intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
     505           5 :   return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
     506             : }
     507             : 
     508        1000 : size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
     509             :     AllocationSpace space) {
     510       47641 :   if (space == CODE_SPACE) {
     511         500 :     return ObjectStartOffsetInCodePage();
     512             :   }
     513             :   return ObjectStartOffsetInDataPage();
     514             : }
     515             : 
     516     1958316 : size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
     517             :   size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
     518             :   DCHECK_LE(kMaxRegularHeapObjectSize, memory);
     519     1958316 :   return memory;
     520             : }
     521             : 
     522     1346820 : size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     523             :     AllocationSpace space) {
     524     1851524 :   if (space == CODE_SPACE) {
     525      587937 :     return AllocatableMemoryInCodePage();
     526             :   }
     527             :   return AllocatableMemoryInDataPage();
     528             : }
     529             : 
     530           0 : Heap* MemoryChunk::synchronized_heap() {
     531             :   return reinterpret_cast<Heap*>(
     532           0 :       base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
     533             : }
     534             : 
     535           0 : void MemoryChunk::InitializationMemoryFence() {
     536             :   base::SeqCst_MemoryFence();
     537             : #ifdef THREAD_SANITIZER
     538             :   // Since TSAN does not process memory fences, we use the following annotation
     539             :   // to tell TSAN that there is no data race when emitting a
     540             :   // InitializationMemoryFence. Note that the other thread still needs to
     541             :   // perform MemoryChunk::synchronized_heap().
     542             :   base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
     543             :                       reinterpret_cast<base::AtomicWord>(heap_));
     544             : #endif
     545           0 : }
     546             : 
     547     3085974 : void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     548             :     PageAllocator::Permission permission) {
     549             :   DCHECK(permission == PageAllocator::kRead ||
     550             :          permission == PageAllocator::kReadExecute);
     551             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     552             :   DCHECK(owner()->identity() == CODE_SPACE ||
     553             :          owner()->identity() == CODE_LO_SPACE);
     554             :   // Decrementing the write_unprotect_counter_ and changing the page
     555             :   // protection mode has to be atomic.
     556     3085974 :   base::MutexGuard guard(page_protection_change_mutex_);
     557     3085972 :   if (write_unprotect_counter_ == 0) {
     558             :     // This is a corner case that may happen when we have a
     559             :     // CodeSpaceMemoryModificationScope open and this page was newly
     560             :     // added.
     561             :     return;
     562             :   }
     563     3085972 :   write_unprotect_counter_--;
     564             :   DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     565     3085972 :   if (write_unprotect_counter_ == 0) {
     566             :     Address protect_start =
     567     3043414 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     568             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     569             :     DCHECK(IsAligned(protect_start, page_size));
     570             :     size_t protect_size = RoundUp(area_size(), page_size);
     571     3043413 :     CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
     572             :   }
     573             : }
     574             : 
     575       72452 : void MemoryChunk::SetReadable() {
     576       87878 :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
     577       72452 : }
     578             : 
     579     2577427 : void MemoryChunk::SetReadAndExecutable() {
     580             :   DCHECK(!FLAG_jitless);
     581             :   DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     582     2998096 :       PageAllocator::kReadExecute);
     583     2577427 : }
     584             : 
     585     3023027 : void MemoryChunk::SetReadAndWritable() {
     586             :   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
     587             :   DCHECK(owner()->identity() == CODE_SPACE ||
     588             :          owner()->identity() == CODE_LO_SPACE);
     589             :   // Incrementing the write_unprotect_counter_ and changing the page
     590             :   // protection mode has to be atomic.
     591     3023027 :   base::MutexGuard guard(page_protection_change_mutex_);
     592     3023030 :   write_unprotect_counter_++;
     593             :   DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
     594     3023030 :   if (write_unprotect_counter_ == 1) {
     595             :     Address unprotect_start =
     596     2980472 :         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     597             :     size_t page_size = MemoryAllocator::GetCommitPageSize();
     598             :     DCHECK(IsAligned(unprotect_start, page_size));
     599             :     size_t unprotect_size = RoundUp(area_size(), page_size);
     600     2980469 :     CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
     601             :                                       PageAllocator::kReadWrite));
     602             :   }
     603     3023030 : }
     604             : 
     605             : namespace {
     606             : 
     607             : PageAllocator::Permission DefaultWritableCodePermissions() {
     608             :   return FLAG_jitless ? PageAllocator::kReadWrite
     609           0 :                       : PageAllocator::kReadWriteExecute;
     610             : }
     611             : 
     612             : }  // namespace
     613             : 
     614      965180 : MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
     615             :                                      Address area_start, Address area_end,
     616             :                                      Executability executable, Space* owner,
     617             :                                      VirtualMemory reservation) {
     618             :   MemoryChunk* chunk = FromAddress(base);
     619             : 
     620             :   DCHECK_EQ(base, chunk->address());
     621             : 
     622      965180 :   chunk->heap_ = heap;
     623      965180 :   chunk->size_ = size;
     624      965180 :   chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
     625             :   DCHECK(HasHeaderSentinel(area_start));
     626      965180 :   chunk->area_start_ = area_start;
     627      965180 :   chunk->area_end_ = area_end;
     628      965180 :   chunk->flags_ = Flags(NO_FLAGS);
     629             :   chunk->set_owner(owner);
     630             :   chunk->InitializeReservedMemory();
     631      965180 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
     632      965180 :   base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
     633      965180 :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
     634             :                                        nullptr);
     635      965180 :   base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
     636             :                                        nullptr);
     637      965180 :   chunk->invalidated_slots_ = nullptr;
     638      965180 :   chunk->skip_list_ = nullptr;
     639             :   chunk->progress_bar_ = 0;
     640      965180 :   chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
     641             :   chunk->set_concurrent_sweeping_state(kSweepingDone);
     642      965180 :   chunk->page_protection_change_mutex_ = new base::Mutex();
     643      965179 :   chunk->write_unprotect_counter_ = 0;
     644      965179 :   chunk->mutex_ = new base::Mutex();
     645      965177 :   chunk->allocated_bytes_ = chunk->area_size();
     646      965177 :   chunk->wasted_memory_ = 0;
     647      965177 :   chunk->young_generation_bitmap_ = nullptr;
     648      965177 :   chunk->marking_bitmap_ = nullptr;
     649      965177 :   chunk->local_tracker_ = nullptr;
     650             : 
     651             :   chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
     652             :       0;
     653             :   chunk->external_backing_store_bytes_
     654             :       [ExternalBackingStoreType::kExternalString] = 0;
     655             : 
     656    12547301 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     657     5791062 :     chunk->categories_[i] = nullptr;
     658             :   }
     659             : 
     660             :   chunk->AllocateMarkingBitmap();
     661      965177 :   if (owner->identity() == RO_SPACE) {
     662             :     heap->incremental_marking()
     663             :         ->non_atomic_marking_state()
     664             :         ->bitmap(chunk)
     665             :         ->MarkAllBits();
     666             :   } else {
     667             :     heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
     668             :                                                                           0);
     669             :   }
     670             : 
     671             :   DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
     672             :   DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
     673             :   DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
     674             : 
     675      965177 :   if (executable == EXECUTABLE) {
     676             :     chunk->SetFlag(IS_EXECUTABLE);
     677      131727 :     if (heap->write_protect_code_memory()) {
     678             :       chunk->write_unprotect_counter_ =
     679      131727 :           heap->code_space_memory_modification_scope_depth();
     680             :     } else {
     681             :       size_t page_size = MemoryAllocator::GetCommitPageSize();
     682             :       DCHECK(IsAligned(area_start, page_size));
     683           0 :       size_t area_size = RoundUp(area_end - area_start, page_size);
     684           0 :       CHECK(reservation.SetPermissions(area_start, area_size,
     685             :                                        DefaultWritableCodePermissions()));
     686             :     }
     687             :   }
     688             : 
     689             :   chunk->reservation_ = std::move(reservation);
     690             : 
     691      965179 :   return chunk;
     692             : }
     693             : 
     694      467871 : Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     695             :   Page* page = static_cast<Page*>(chunk);
     696             :   DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
     697             :                 page->owner()->identity()),
     698             :             page->area_size());
     699             :   // Make sure that categories are initialized before freeing the area.
     700             :   page->ResetAllocatedBytes();
     701             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     702      467871 :   page->AllocateFreeListCategories();
     703             :   page->InitializeFreeListCategories();
     704             :   page->list_node().Initialize();
     705             :   page->InitializationMemoryFence();
     706      467873 :   return page;
     707             : }
     708             : 
     709      424780 : Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
     710             :   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
     711             :   bool in_to_space = (id() != kFromSpace);
     712      424780 :   chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
     713             :   Page* page = static_cast<Page*>(chunk);
     714             :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
     715      424780 :   page->AllocateLocalTracker();
     716             :   page->list_node().Initialize();
     717             : #ifdef ENABLE_MINOR_MC
     718      424782 :   if (FLAG_minor_mc) {
     719             :     page->AllocateYoungGenerationBitmap();
     720             :     heap()
     721             :         ->minor_mark_compact_collector()
     722             :         ->non_atomic_marking_state()
     723           0 :         ->ClearLiveness(page);
     724             :   }
     725             : #endif  // ENABLE_MINOR_MC
     726             :   page->InitializationMemoryFence();
     727      424782 :   return page;
     728             : }
     729             : 
     730       73011 : LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
     731             :                                  Executability executable) {
     732       73011 :   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
     733             :     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
     734           0 :     FATAL("Code page is too large.");
     735             :   }
     736             : 
     737             :   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
     738             : 
     739             :   // Initialize the sentinel value for each page boundary since the mutator
     740             :   // may initialize the object starting from its end.
     741             :   Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
     742       73011 :                      MemoryChunk::kPageSize;
     743      725901 :   while (sentinel < chunk->area_end()) {
     744      326445 :     *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
     745      326445 :     sentinel += MemoryChunk::kPageSize;
     746             :   }
     747             : 
     748             :   LargePage* page = static_cast<LargePage*>(chunk);
     749             :   page->SetFlag(MemoryChunk::LARGE_PAGE);
     750             :   page->list_node().Initialize();
     751       73011 :   return page;
     752             : }
     753             : 
     754      467871 : void Page::AllocateFreeListCategories() {
     755     6082345 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     756             :     categories_[i] = new FreeListCategory(
     757     5614472 :         reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
     758             :   }
     759      467873 : }
     760             : 
     761         103 : void Page::InitializeFreeListCategories() {
     762     6083688 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     763     2807856 :     categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
     764             :   }
     765         103 : }
     766             : 
     767           0 : void Page::ReleaseFreeListCategories() {
     768    11596532 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     769     5352238 :     if (categories_[i] != nullptr) {
     770     2806826 :       delete categories_[i];
     771     2806830 :       categories_[i] = nullptr;
     772             :     }
     773             :   }
     774           0 : }
     775             : 
     776        1486 : Page* Page::ConvertNewToOld(Page* old_page) {
     777             :   DCHECK(old_page);
     778             :   DCHECK(old_page->InNewSpace());
     779             :   OldSpace* old_space = old_page->heap()->old_space();
     780             :   old_page->set_owner(old_space);
     781             :   old_page->SetFlags(0, static_cast<uintptr_t>(~0));
     782        1486 :   Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
     783        1486 :   old_space->AddPage(new_page);
     784        1486 :   return new_page;
     785             : }
     786             : 
     787       18181 : size_t MemoryChunk::CommittedPhysicalMemory() {
     788       36362 :   if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
     789       14774 :     return size();
     790        3407 :   return high_water_mark_;
     791             : }
     792             : 
     793        9016 : bool MemoryChunk::InOldSpace() const {
     794        9016 :   return owner()->identity() == OLD_SPACE;
     795             : }
     796             : 
     797           0 : bool MemoryChunk::InLargeObjectSpace() const {
     798           0 :   return owner()->identity() == LO_SPACE;
     799             : }
     800             : 
     801      918538 : MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
     802             :                                             size_t commit_area_size,
     803             :                                             Executability executable,
     804             :                                             Space* owner) {
     805             :   DCHECK_LE(commit_area_size, reserve_area_size);
     806             : 
     807             :   size_t chunk_size;
     808      918538 :   Heap* heap = isolate_->heap();
     809             :   Address base = kNullAddress;
     810     1837076 :   VirtualMemory reservation;
     811             :   Address area_start = kNullAddress;
     812             :   Address area_end = kNullAddress;
     813             :   void* address_hint =
     814             :       AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
     815             : 
     816             :   //
     817             :   // MemoryChunk layout:
     818             :   //
     819             :   //             Executable
     820             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     821             :   // |           Header           |
     822             :   // +----------------------------+<- base + CodePageGuardStartOffset
     823             :   // |           Guard            |
     824             :   // +----------------------------+<- area_start_
     825             :   // |           Area             |
     826             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     827             :   // |   Committed but not used   |
     828             :   // +----------------------------+<- aligned at OS page boundary
     829             :   // | Reserved but not committed |
     830             :   // +----------------------------+<- aligned at OS page boundary
     831             :   // |           Guard            |
     832             :   // +----------------------------+<- base + chunk_size
     833             :   //
     834             :   //           Non-executable
     835             :   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
     836             :   // |          Header            |
     837             :   // +----------------------------+<- area_start_ (base + area_start_)
     838             :   // |           Area             |
     839             :   // +----------------------------+<- area_end_ (area_start + commit_area_size)
     840             :   // |  Committed but not used    |
     841             :   // +----------------------------+<- aligned at OS page boundary
     842             :   // | Reserved but not committed |
     843             :   // +----------------------------+<- base + chunk_size
     844             :   //
     845             : 
     846      918539 :   if (executable == EXECUTABLE) {
     847      263454 :     chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
     848             :                                reserve_area_size +
     849             :                                MemoryChunkLayout::CodePageGuardSize(),
     850             :                            GetCommitPageSize());
     851             : 
     852             :     // Size of header (not executable) plus area (executable).
     853      131727 :     size_t commit_size = ::RoundUp(
     854             :         MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
     855             :         GetCommitPageSize());
     856             :     base =
     857             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     858      131727 :                               executable, address_hint, &reservation);
     859      131727 :     if (base == kNullAddress) return nullptr;
     860             :     // Update executable memory size.
     861             :     size_executable_ += reservation.size();
     862             : 
     863             :     if (Heap::ShouldZapGarbage()) {
     864             :       ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
     865             :       ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
     866             :                commit_area_size, kZapValue);
     867             :     }
     868             : 
     869      131727 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
     870      131727 :     area_end = area_start + commit_area_size;
     871             :   } else {
     872      786812 :     chunk_size = ::RoundUp(
     873             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
     874             :         GetCommitPageSize());
     875      786812 :     size_t commit_size = ::RoundUp(
     876             :         MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     877             :         GetCommitPageSize());
     878             :     base =
     879             :         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
     880      786812 :                               executable, address_hint, &reservation);
     881             : 
     882      786812 :     if (base == kNullAddress) return nullptr;
     883             : 
     884             :     if (Heap::ShouldZapGarbage()) {
     885             :       ZapBlock(
     886             :           base,
     887             :           MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
     888             :           kZapValue);
     889             :     }
     890             : 
     891      786812 :     area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
     892      786812 :     area_end = area_start + commit_area_size;
     893             :   }
     894             : 
     895             :   // Use chunk_size for statistics and callbacks because we assume that they
     896             :   // treat reserved but not-yet committed memory regions of chunks as allocated.
     897      918539 :   isolate_->counters()->memory_allocated()->Increment(
     898      918539 :       static_cast<int>(chunk_size));
     899             : 
     900      918539 :   LOG(isolate_,
     901             :       NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
     902             : 
     903             :   // We cannot use the last chunk in the address space because we would
     904             :   // overflow when comparing top and limit if this chunk is used for a
     905             :   // linear allocation area.
     906      918539 :   if ((base + chunk_size) == 0u) {
     907           0 :     CHECK(!last_chunk_.IsReserved());
     908           0 :     last_chunk_.TakeControl(&reservation);
     909           0 :     UncommitMemory(&last_chunk_);
     910             :     size_ -= chunk_size;
     911           0 :     if (executable == EXECUTABLE) {
     912             :       size_executable_ -= chunk_size;
     913             :     }
     914           0 :     CHECK(last_chunk_.IsReserved());
     915             :     return AllocateChunk(reserve_area_size, commit_area_size, executable,
     916           0 :                          owner);
     917             :   }
     918             : 
     919             :   MemoryChunk* chunk =
     920      918537 :       MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
     921      918539 :                               executable, owner, std::move(reservation));
     922             : 
     923      918537 :   if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
     924             :   return chunk;
     925             : }
     926             : 
     927      361420 : void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
     928      891715 :   if (is_marking) {
     929             :     SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     930             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     931             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     932             :   } else {
     933             :     ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     934             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     935             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     936             :   }
     937      361420 : }
     938             : 
     939      332706 : void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
     940             :   SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     941      773147 :   if (is_marking) {
     942             :     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     943             :     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
     944             :   } else {
     945             :     ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
     946             :     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
     947             :   }
     948      332706 : }
     949             : 
     950     1416945 : void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
     951             : 
     952      435575 : void Page::AllocateLocalTracker() {
     953             :   DCHECK_NULL(local_tracker_);
     954      871152 :   local_tracker_ = new LocalArrayBufferTracker(this);
     955      435577 : }
     956             : 
     957       17927 : bool Page::contains_array_buffers() {
     958       17927 :   return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
     959             : }
     960             : 
     961           0 : void Page::ResetFreeListStatistics() {
     962      492088 :   wasted_memory_ = 0;
     963           0 : }
     964             : 
     965           0 : size_t Page::AvailableInFreeList() {
     966           0 :   size_t sum = 0;
     967             :   ForAllFreeListCategories([&sum](FreeListCategory* category) {
     968           0 :     sum += category->available();
     969             :   });
     970           0 :   return sum;
     971             : }
     972             : 
     973             : #ifdef DEBUG
     974             : namespace {
     975             : // Skips filler starting from the given filler until the end address.
     976             : // Returns the first address after the skipped fillers.
     977             : Address SkipFillers(HeapObject filler, Address end) {
     978             :   Address addr = filler->address();
     979             :   while (addr < end) {
     980             :     filler = HeapObject::FromAddress(addr);
     981             :     CHECK(filler->IsFiller());
     982             :     addr = filler->address() + filler->Size();
     983             :   }
     984             :   return addr;
     985             : }
     986             : }  // anonymous namespace
     987             : #endif  // DEBUG
     988             : 
     989      187032 : size_t Page::ShrinkToHighWaterMark() {
     990             :   // Shrinking only makes sense outside of the CodeRange, where we don't care
     991             :   // about address space fragmentation.
     992             :   VirtualMemory* reservation = reserved_memory();
     993      187032 :   if (!reservation->IsReserved()) return 0;
     994             : 
     995             :   // Shrink pages to high water mark. The water mark points either to a filler
     996             :   // or the area_end.
     997      374064 :   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
     998      187032 :   if (filler->address() == area_end()) return 0;
     999      187023 :   CHECK(filler->IsFiller());
    1000             :   // Ensure that no objects were allocated in [filler, area_end) region.
    1001             :   DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
    1002             :   // Ensure that no objects will be allocated on this page.
    1003             :   DCHECK_EQ(0u, AvailableInFreeList());
    1004             : 
    1005      187023 :   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
    1006             :                             MemoryAllocator::GetCommitPageSize());
    1007      187023 :   if (unused > 0) {
    1008             :     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
    1009      186998 :     if (FLAG_trace_gc_verbose) {
    1010           0 :       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
    1011             :                    reinterpret_cast<void*>(this),
    1012             :                    reinterpret_cast<void*>(area_end()),
    1013           0 :                    reinterpret_cast<void*>(area_end() - unused));
    1014             :     }
    1015             :     heap()->CreateFillerObjectAt(
    1016             :         filler->address(),
    1017             :         static_cast<int>(area_end() - filler->address() - unused),
    1018      373996 :         ClearRecordedSlots::kNo);
    1019      373994 :     heap()->memory_allocator()->PartialFreeMemory(
    1020      373994 :         this, address() + size() - unused, unused, area_end() - unused);
    1021      186998 :     if (filler->address() != area_end()) {
    1022      186998 :       CHECK(filler->IsFiller());
    1023      186998 :       CHECK_EQ(filler->address() + filler->Size(), area_end());
    1024             :     }
    1025             :   }
    1026             :   return unused;
    1027             : }
    1028             : 
    1029      159800 : void Page::CreateBlackArea(Address start, Address end) {
    1030             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1031             :   DCHECK_EQ(Page::FromAddress(start), this);
    1032             :   DCHECK_NE(start, end);
    1033             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1034             :   IncrementalMarking::MarkingState* marking_state =
    1035             :       heap()->incremental_marking()->marking_state();
    1036      159800 :   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
    1037      159800 :                                         AddressToMarkbitIndex(end));
    1038      159800 :   marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
    1039      159800 : }
    1040             : 
    1041        6619 : void Page::DestroyBlackArea(Address start, Address end) {
    1042             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1043             :   DCHECK_EQ(Page::FromAddress(start), this);
    1044             :   DCHECK_NE(start, end);
    1045             :   DCHECK_EQ(Page::FromAddress(end - 1), this);
    1046             :   IncrementalMarking::MarkingState* marking_state =
    1047             :       heap()->incremental_marking()->marking_state();
    1048        6619 :   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
    1049        6619 :                                           AddressToMarkbitIndex(end));
    1050        6619 :   marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
    1051        6619 : }
    1052             : 
    1053      187081 : void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1054             :                                         size_t bytes_to_free,
    1055             :                                         Address new_area_end) {
    1056             :   VirtualMemory* reservation = chunk->reserved_memory();
    1057             :   DCHECK(reservation->IsReserved());
    1058      187081 :   chunk->size_ -= bytes_to_free;
    1059      187081 :   chunk->area_end_ = new_area_end;
    1060      187081 :   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
    1061             :     // Add guard page at the end.
    1062       62331 :     size_t page_size = GetCommitPageSize();
    1063             :     DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
    1064             :     DCHECK_EQ(chunk->address() + chunk->size(),
    1065             :               chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
    1066       62331 :     reservation->SetPermissions(chunk->area_end_, page_size,
    1067       62331 :                                 PageAllocator::kNoAccess);
    1068             :   }
    1069             :   // On e.g. Windows, a reservation may be larger than a page and releasing
    1070             :   // partially starting at |start_free| will also release the potentially
    1071             :   // unused part behind the current page.
    1072      187081 :   const size_t released_bytes = reservation->Release(start_free);
    1073             :   DCHECK_GE(size_, released_bytes);
    1074             :   size_ -= released_bytes;
    1075      187082 :   isolate_->counters()->memory_allocated()->Decrement(
    1076      187082 :       static_cast<int>(released_bytes));
    1077      187082 : }
    1078             : 
    1079      965060 : void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
    1080             :   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1081      965060 :   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
    1082             : 
    1083      965060 :   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
    1084      965060 :                                          chunk->IsEvacuationCandidate());
    1085             : 
    1086             :   VirtualMemory* reservation = chunk->reserved_memory();
    1087             :   const size_t size =
    1088      965060 :       reservation->IsReserved() ? reservation->size() : chunk->size();
    1089             :   DCHECK_GE(size_, static_cast<size_t>(size));
    1090             :   size_ -= size;
    1091     1930120 :   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
    1092      965060 :   if (chunk->executable() == EXECUTABLE) {
    1093             :     DCHECK_GE(size_executable_, size);
    1094             :     size_executable_ -= size;
    1095             :   }
    1096             : 
    1097             :   chunk->SetFlag(MemoryChunk::PRE_FREED);
    1098             : 
    1099      965060 :   if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
    1100      965060 : }
    1101             : 
    1102             : 
    1103      944515 : void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
    1104             :   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
    1105      944515 :   chunk->ReleaseAllocatedMemory();
    1106             : 
    1107             :   VirtualMemory* reservation = chunk->reserved_memory();
    1108      944514 :   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
    1109      402724 :     UncommitMemory(reservation);
    1110             :   } else {
    1111      541790 :     if (reservation->IsReserved()) {
    1112      479363 :       reservation->Free();
    1113             :     } else {
    1114             :       // Only read-only pages can have non-initialized reservation object.
    1115             :       DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
    1116             :       FreeMemory(page_allocator(chunk->executable()), chunk->address(),
    1117       62427 :                  chunk->size());
    1118             :     }
    1119             :   }
    1120      944502 : }
    1121             : 
    1122             : template <MemoryAllocator::FreeMode mode>
    1123     1341687 : void MemoryAllocator::Free(MemoryChunk* chunk) {
    1124             :   switch (mode) {
    1125             :     case kFull:
    1126      508675 :       PreFreeMemory(chunk);
    1127      508675 :       PerformFreeMemory(chunk);
    1128             :       break;
    1129             :     case kAlreadyPooled:
    1130             :       // Pooled pages cannot be touched anymore as their memory is uncommitted.
    1131             :       // Pooled pages are not-executable.
    1132      376627 :       FreeMemory(data_page_allocator(), chunk->address(),
    1133             :                  static_cast<size_t>(MemoryChunk::kPageSize));
    1134             :       break;
    1135             :     case kPooledAndQueue:
    1136             :       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
    1137             :       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
    1138             :       chunk->SetFlag(MemoryChunk::POOLED);
    1139             :       V8_FALLTHROUGH;
    1140             :     case kPreFreeAndQueue:
    1141      456385 :       PreFreeMemory(chunk);
    1142             :       // The chunks added to this queue will be freed by a concurrent thread.
    1143      456385 :       unmapper()->AddMemoryChunkSafe(chunk);
    1144             :       break;
    1145             :   }
    1146     1341687 : }
    1147             : 
    1148             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1149             :     MemoryAllocator::kFull>(MemoryChunk* chunk);
    1150             : 
    1151             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1152             :     MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
    1153             : 
    1154             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1155             :     MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
    1156             : 
    1157             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    1158             :     MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
    1159             : 
    1160             : template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
    1161      891166 : Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
    1162             :                                     Executability executable) {
    1163             :   MemoryChunk* chunk = nullptr;
    1164             :   if (alloc_mode == kPooled) {
    1165             :     DCHECK_EQ(size, static_cast<size_t>(
    1166             :                         MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    1167             :                             owner->identity())));
    1168             :     DCHECK_EQ(executable, NOT_EXECUTABLE);
    1169      424781 :     chunk = AllocatePagePooled(owner);
    1170             :   }
    1171      424782 :   if (chunk == nullptr) {
    1172      844526 :     chunk = AllocateChunk(size, size, executable, owner);
    1173             :   }
    1174      891168 :   if (chunk == nullptr) return nullptr;
    1175      891167 :   return owner->InitializePage(chunk, executable);
    1176             : }
    1177             : 
    1178             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1179             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1180             :         size_t size, PagedSpace* owner, Executability executable);
    1181             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1182             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1183             :         size_t size, SemiSpace* owner, Executability executable);
    1184             : template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    1185             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1186             :         size_t size, SemiSpace* owner, Executability executable);
    1187             : 
    1188       73011 : LargePage* MemoryAllocator::AllocateLargePage(size_t size,
    1189             :                                               LargeObjectSpace* owner,
    1190             :                                               Executability executable) {
    1191       73011 :   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
    1192       73011 :   if (chunk == nullptr) return nullptr;
    1193      146022 :   return LargePage::Initialize(isolate_->heap(), chunk, executable);
    1194             : }
    1195             : 
    1196             : template <typename SpaceType>
    1197      424782 : MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
    1198      424782 :   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
    1199      424782 :   if (chunk == nullptr) return nullptr;
    1200             :   const int size = MemoryChunk::kPageSize;
    1201       46641 :   const Address start = reinterpret_cast<Address>(chunk);
    1202             :   const Address area_start =
    1203             :       start +
    1204       46641 :       MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
    1205       46641 :   const Address area_end = start + size;
    1206             :   // Pooled pages are always regular data pages.
    1207             :   DCHECK_NE(CODE_SPACE, owner->identity());
    1208       46641 :   VirtualMemory reservation(data_page_allocator(), start, size);
    1209       46641 :   if (!CommitMemory(&reservation)) return nullptr;
    1210             :   if (Heap::ShouldZapGarbage()) {
    1211             :     ZapBlock(start, size, kZapValue);
    1212             :   }
    1213       93282 :   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
    1214             :                           NOT_EXECUTABLE, owner, std::move(reservation));
    1215             :   size_ += size;
    1216       46641 :   return chunk;
    1217             : }
    1218             : 
    1219           0 : void MemoryAllocator::ZapBlock(Address start, size_t size,
    1220             :                                uintptr_t zap_value) {
    1221             :   DCHECK(IsAligned(start, kTaggedSize));
    1222             :   DCHECK(IsAligned(size, kTaggedSize));
    1223           0 :   MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
    1224             :                size >> kTaggedSizeLog2);
    1225           0 : }
    1226             : 
    1227           5 : intptr_t MemoryAllocator::GetCommitPageSize() {
    1228    26231287 :   if (FLAG_v8_os_page_size != 0) {
    1229             :     DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
    1230         926 :     return FLAG_v8_os_page_size * KB;
    1231             :   } else {
    1232    26230361 :     return CommitPageSize();
    1233             :   }
    1234             : }
    1235             : 
    1236     2780680 : base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
    1237             :                                                               size_t size) {
    1238     2782362 :   size_t page_size = MemoryAllocator::GetCommitPageSize();
    1239     2782362 :   if (size < page_size + FreeSpace::kSize) {
    1240     2752639 :     return base::AddressRegion(0, 0);
    1241             :   }
    1242       29723 :   Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
    1243       29723 :   Address discardable_end = RoundDown(addr + size, page_size);
    1244       29723 :   if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
    1245             :   return base::AddressRegion(discardable_start,
    1246       27581 :                              discardable_end - discardable_start);
    1247             : }
    1248             : 
    1249      131727 : bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
    1250             :                                              size_t commit_size,
    1251             :                                              size_t reserved_size) {
    1252      131727 :   const size_t page_size = GetCommitPageSize();
    1253             :   // All addresses and sizes must be aligned to the commit page size.
    1254             :   DCHECK(IsAligned(start, page_size));
    1255             :   DCHECK_EQ(0, commit_size % page_size);
    1256             :   DCHECK_EQ(0, reserved_size % page_size);
    1257             :   const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
    1258             :   const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
    1259             :   const size_t code_area_offset =
    1260      131727 :       MemoryChunkLayout::ObjectStartOffsetInCodePage();
    1261             :   // reserved_size includes two guard regions, commit_size does not.
    1262             :   DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
    1263      131727 :   const Address pre_guard_page = start + pre_guard_offset;
    1264      131727 :   const Address code_area = start + code_area_offset;
    1265      131727 :   const Address post_guard_page = start + reserved_size - guard_size;
    1266             :   // Commit the non-executable header, from start to pre-code guard page.
    1267      131727 :   if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
    1268             :     // Create the pre-code guard page, following the header.
    1269      131727 :     if (vm->SetPermissions(pre_guard_page, page_size,
    1270             :                            PageAllocator::kNoAccess)) {
    1271             :       // Commit the executable code body.
    1272      131726 :       if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
    1273             :                              PageAllocator::kReadWrite)) {
    1274             :         // Create the post-code guard page.
    1275      131727 :         if (vm->SetPermissions(post_guard_page, page_size,
    1276             :                                PageAllocator::kNoAccess)) {
    1277      131727 :           UpdateAllocatedSpaceLimits(start, code_area + commit_size);
    1278      131727 :           return true;
    1279             :         }
    1280           0 :         vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
    1281             :       }
    1282             :     }
    1283           0 :     vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
    1284             :   }
    1285             :   return false;
    1286             : }
    1287             : 
    1288             : 
    1289             : // -----------------------------------------------------------------------------
    1290             : // MemoryChunk implementation
    1291             : 
    1292      965059 : void MemoryChunk::ReleaseAllocatedMemory() {
    1293      965059 :   if (skip_list_ != nullptr) {
    1294       91032 :     delete skip_list_;
    1295       91032 :     skip_list_ = nullptr;
    1296             :   }
    1297      965059 :   if (mutex_ != nullptr) {
    1298      902632 :     delete mutex_;
    1299      902625 :     mutex_ = nullptr;
    1300             :   }
    1301      965052 :   if (page_protection_change_mutex_ != nullptr) {
    1302      965053 :     delete page_protection_change_mutex_;
    1303      965055 :     page_protection_change_mutex_ = nullptr;
    1304             :   }
    1305      965054 :   ReleaseSlotSet<OLD_TO_NEW>();
    1306      965054 :   ReleaseSlotSet<OLD_TO_OLD>();
    1307      965046 :   ReleaseTypedSlotSet<OLD_TO_NEW>();
    1308      965043 :   ReleaseTypedSlotSet<OLD_TO_OLD>();
    1309      965040 :   ReleaseInvalidatedSlots();
    1310      965037 :   if (local_tracker_ != nullptr) ReleaseLocalTracker();
    1311      965059 :   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
    1312      965059 :   if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
    1313             : 
    1314      965059 :   if (!IsLargePage()) {
    1315             :     Page* page = static_cast<Page*>(this);
    1316             :     page->ReleaseFreeListCategories();
    1317             :   }
    1318      965063 : }
    1319             : 
    1320      105246 : static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
    1321      105246 :   size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
    1322             :   DCHECK_LT(0, pages);
    1323      105246 :   SlotSet* slot_set = new SlotSet[pages];
    1324      325715 :   for (size_t i = 0; i < pages; i++) {
    1325      110232 :     slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
    1326             :   }
    1327      105251 :   return slot_set;
    1328             : }
    1329             : 
    1330             : template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
    1331             : template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
    1332             : 
    1333             : template <RememberedSetType type>
    1334      105247 : SlotSet* MemoryChunk::AllocateSlotSet() {
    1335      105247 :   SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
    1336      105251 :   SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
    1337             :       &slot_set_[type], nullptr, slot_set);
    1338      105251 :   if (old_slot_set != nullptr) {
    1339          61 :     delete[] slot_set;
    1340             :     slot_set = old_slot_set;
    1341             :   }
    1342             :   DCHECK(slot_set);
    1343      105251 :   return slot_set;
    1344             : }
    1345             : 
    1346             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
    1347             : template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
    1348             : 
    1349             : template <RememberedSetType type>
    1350     1944677 : void MemoryChunk::ReleaseSlotSet() {
    1351     1944677 :   SlotSet* slot_set = slot_set_[type];
    1352     1944677 :   if (slot_set) {
    1353      105180 :     slot_set_[type] = nullptr;
    1354      105180 :     delete[] slot_set;
    1355             :   }
    1356     1944684 : }
    1357             : 
    1358             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
    1359             : template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
    1360             : 
    1361             : template <RememberedSetType type>
    1362        9591 : TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
    1363        9591 :   TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
    1364        9591 :   TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
    1365             :       &typed_slot_set_[type], nullptr, typed_slot_set);
    1366        9591 :   if (old_value != nullptr) {
    1367           0 :     delete typed_slot_set;
    1368             :     typed_slot_set = old_value;
    1369             :   }
    1370             :   DCHECK(typed_slot_set);
    1371        9591 :   return typed_slot_set;
    1372             : }
    1373             : 
    1374             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
    1375             : template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
    1376             : 
    1377             : template <RememberedSetType type>
    1378     1933523 : void MemoryChunk::ReleaseTypedSlotSet() {
    1379     1933523 :   TypedSlotSet* typed_slot_set = typed_slot_set_[type];
    1380     1933523 :   if (typed_slot_set) {
    1381        9591 :     typed_slot_set_[type] = nullptr;
    1382        9591 :     delete typed_slot_set;
    1383             :   }
    1384     1933523 : }
    1385             : 
    1386         255 : InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
    1387             :   DCHECK_NULL(invalidated_slots_);
    1388         510 :   invalidated_slots_ = new InvalidatedSlots();
    1389         255 :   return invalidated_slots_;
    1390             : }
    1391             : 
    1392      966224 : void MemoryChunk::ReleaseInvalidatedSlots() {
    1393      966224 :   if (invalidated_slots_) {
    1394         510 :     delete invalidated_slots_;
    1395         255 :     invalidated_slots_ = nullptr;
    1396             :   }
    1397      966224 : }
    1398             : 
    1399       26893 : void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
    1400             :                                                      int size) {
    1401       26893 :   if (!ShouldSkipEvacuationSlotRecording()) {
    1402       21776 :     if (invalidated_slots() == nullptr) {
    1403         255 :       AllocateInvalidatedSlots();
    1404             :     }
    1405       21776 :     int old_size = (*invalidated_slots())[object];
    1406       43552 :     (*invalidated_slots())[object] = std::max(old_size, size);
    1407             :   }
    1408       26893 : }
    1409             : 
    1410           0 : bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
    1411           0 :   if (ShouldSkipEvacuationSlotRecording()) {
    1412             :     // Invalidated slots do not matter if we are not recording slots.
    1413             :     return true;
    1414             :   }
    1415           0 :   if (invalidated_slots() == nullptr) {
    1416             :     return false;
    1417             :   }
    1418             :   return invalidated_slots()->find(object) != invalidated_slots()->end();
    1419             : }
    1420             : 
    1421           5 : void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
    1422             :                                                  HeapObject new_start) {
    1423             :   DCHECK_LT(old_start, new_start);
    1424             :   DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
    1425             :             MemoryChunk::FromHeapObject(new_start));
    1426           5 :   if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
    1427             :     auto it = invalidated_slots()->find(old_start);
    1428           0 :     if (it != invalidated_slots()->end()) {
    1429           0 :       int old_size = it->second;
    1430           0 :       int delta = static_cast<int>(new_start->address() - old_start->address());
    1431             :       invalidated_slots()->erase(it);
    1432           0 :       (*invalidated_slots())[new_start] = old_size - delta;
    1433             :     }
    1434             :   }
    1435           5 : }
    1436             : 
    1437      435495 : void MemoryChunk::ReleaseLocalTracker() {
    1438             :   DCHECK_NOT_NULL(local_tracker_);
    1439      435495 :   delete local_tracker_;
    1440      435515 :   local_tracker_ = nullptr;
    1441      435515 : }
    1442             : 
    1443           0 : void MemoryChunk::AllocateYoungGenerationBitmap() {
    1444             :   DCHECK_NULL(young_generation_bitmap_);
    1445           0 :   young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1446           0 : }
    1447             : 
    1448           0 : void MemoryChunk::ReleaseYoungGenerationBitmap() {
    1449             :   DCHECK_NOT_NULL(young_generation_bitmap_);
    1450           0 :   free(young_generation_bitmap_);
    1451           0 :   young_generation_bitmap_ = nullptr;
    1452           0 : }
    1453             : 
    1454           0 : void MemoryChunk::AllocateMarkingBitmap() {
    1455             :   DCHECK_NULL(marking_bitmap_);
    1456      965177 :   marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
    1457           0 : }
    1458             : 
    1459           0 : void MemoryChunk::ReleaseMarkingBitmap() {
    1460             :   DCHECK_NOT_NULL(marking_bitmap_);
    1461      965059 :   free(marking_bitmap_);
    1462      965059 :   marking_bitmap_ = nullptr;
    1463           0 : }
    1464             : 
    1465             : // -----------------------------------------------------------------------------
    1466             : // PagedSpace implementation
    1467             : 
    1468           0 : void Space::CheckOffsetsAreConsistent() const {
    1469             :   static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
    1470             :                 "ID offset inconsistent");
    1471             :   DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
    1472           0 : }
    1473             : 
    1474       91780 : void Space::AddAllocationObserver(AllocationObserver* observer) {
    1475      307211 :   allocation_observers_.push_back(observer);
    1476      307211 :   StartNextInlineAllocationStep();
    1477       91780 : }
    1478             : 
    1479      264619 : void Space::RemoveAllocationObserver(AllocationObserver* observer) {
    1480             :   auto it = std::find(allocation_observers_.begin(),
    1481      264619 :                       allocation_observers_.end(), observer);
    1482             :   DCHECK(allocation_observers_.end() != it);
    1483      264619 :   allocation_observers_.erase(it);
    1484      264619 :   StartNextInlineAllocationStep();
    1485      264619 : }
    1486             : 
    1487      775872 : void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
    1488             : 
    1489      290952 : void Space::ResumeAllocationObservers() {
    1490      775872 :   allocation_observers_paused_ = false;
    1491      290952 : }
    1492             : 
    1493   126174502 : void Space::AllocationStep(int bytes_since_last, Address soon_object,
    1494             :                            int size) {
    1495   126174502 :   if (!AllocationObserversActive()) {
    1496             :     return;
    1497             :   }
    1498             : 
    1499             :   DCHECK(!heap()->allocation_step_in_progress());
    1500             :   heap()->set_allocation_step_in_progress(true);
    1501    23184788 :   heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
    1502    46608114 :   for (AllocationObserver* observer : allocation_observers_) {
    1503    23423364 :     observer->AllocationStep(bytes_since_last, soon_object, size);
    1504             :   }
    1505             :   heap()->set_allocation_step_in_progress(false);
    1506             : }
    1507             : 
    1508           0 : intptr_t Space::GetNextInlineAllocationStepSize() {
    1509             :   intptr_t next_step = 0;
    1510    46320213 :   for (AllocationObserver* observer : allocation_observers_) {
    1511             :     next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
    1512    23269304 :                           : observer->bytes_to_next_step();
    1513             :   }
    1514             :   DCHECK(allocation_observers_.size() == 0 || next_step > 0);
    1515           0 :   return next_step;
    1516             : }
    1517             : 
    1518      504704 : PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
    1519             :                        Executability executable)
    1520     1514114 :     : SpaceWithLinearArea(heap, space), executable_(executable) {
    1521      504704 :   area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
    1522             :   accounting_stats_.Clear();
    1523      504704 : }
    1524             : 
    1525      504645 : void PagedSpace::TearDown() {
    1526     1397781 :   while (!memory_chunk_list_.Empty()) {
    1527             :     MemoryChunk* chunk = memory_chunk_list_.front();
    1528             :     memory_chunk_list_.Remove(chunk);
    1529      446568 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
    1530             :   }
    1531             :   accounting_stats_.Clear();
    1532      504645 : }
    1533             : 
    1534      355721 : void PagedSpace::RefillFreeList() {
    1535             :   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
    1536             :   // generation spaces out.
    1537      517931 :   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
    1538      355721 :       identity() != MAP_SPACE && identity() != RO_SPACE) {
    1539             :     return;
    1540             :   }
    1541             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    1542             :   size_t added = 0;
    1543             :   {
    1544             :     Page* p = nullptr;
    1545      827991 :     while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
    1546             :       // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
    1547             :       // entries here to make them unavailable for allocations.
    1548      472331 :       if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
    1549             :         p->ForAllFreeListCategories(
    1550             :             [](FreeListCategory* category) { category->Reset(); });
    1551             :       }
    1552             :       // Only during compaction pages can actually change ownership. This is
    1553             :       // safe because there exists no other competing action on the page links
    1554             :       // during compaction.
    1555      472331 :       if (is_local()) {
    1556             :         DCHECK_NE(this, p->owner());
    1557             :         PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
    1558             :         base::MutexGuard guard(owner->mutex());
    1559       39853 :         owner->RefineAllocatedBytesAfterSweeping(p);
    1560       39853 :         owner->RemovePage(p);
    1561       39853 :         added += AddPage(p);
    1562             :       } else {
    1563             :         base::MutexGuard guard(mutex());
    1564             :         DCHECK_EQ(this, p->owner());
    1565      432478 :         RefineAllocatedBytesAfterSweeping(p);
    1566      432478 :         added += RelinkFreeListCategories(p);
    1567             :       }
    1568      472331 :       added += p->wasted_memory();
    1569      472331 :       if (is_local() && (added > kCompactionMemoryWanted)) break;
    1570             :     }
    1571             :   }
    1572             : }
    1573             : 
    1574      254927 : void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
    1575             :   base::MutexGuard guard(mutex());
    1576             : 
    1577             :   DCHECK(identity() == other->identity());
    1578             :   // Unmerged fields:
    1579             :   //   area_size_
    1580      254927 :   other->FreeLinearAllocationArea();
    1581             : 
    1582             :   // The linear allocation area of {other} should be destroyed now.
    1583             :   DCHECK_EQ(kNullAddress, other->top());
    1584             :   DCHECK_EQ(kNullAddress, other->limit());
    1585             : 
    1586             :   // Move over pages.
    1587      465523 :   for (auto it = other->begin(); it != other->end();) {
    1588             :     Page* p = *(it++);
    1589             :     // Relinking requires the category to be unlinked.
    1590      105298 :     other->RemovePage(p);
    1591      105298 :     AddPage(p);
    1592             :     DCHECK_IMPLIES(
    1593             :         !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
    1594             :         p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
    1595             :   }
    1596             :   DCHECK_EQ(0u, other->Size());
    1597             :   DCHECK_EQ(0u, other->Capacity());
    1598      254927 : }
    1599             : 
    1600             : 
    1601        1004 : size_t PagedSpace::CommittedPhysicalMemory() {
    1602        1004 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    1603        1004 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1604             :   size_t size = 0;
    1605        3043 :   for (Page* page : *this) {
    1606        2039 :     size += page->CommittedPhysicalMemory();
    1607             :   }
    1608             :   return size;
    1609             : }
    1610             : 
    1611          20 : bool PagedSpace::ContainsSlow(Address addr) {
    1612             :   Page* p = Page::FromAddress(addr);
    1613         505 :   for (Page* page : *this) {
    1614         500 :     if (page == p) return true;
    1615             :   }
    1616             :   return false;
    1617             : }
    1618             : 
    1619      472331 : void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
    1620      472331 :   CHECK(page->SweepingDone());
    1621             :   auto marking_state =
    1622             :       heap()->incremental_marking()->non_atomic_marking_state();
    1623             :   // The live_byte on the page was accounted in the space allocated
    1624             :   // bytes counter. After sweeping allocated_bytes() contains the
    1625             :   // accurate live byte count on the page.
    1626      472331 :   size_t old_counter = marking_state->live_bytes(page);
    1627             :   size_t new_counter = page->allocated_bytes();
    1628             :   DCHECK_GE(old_counter, new_counter);
    1629      472331 :   if (old_counter > new_counter) {
    1630       11928 :     DecreaseAllocatedBytes(old_counter - new_counter, page);
    1631             :     // Give the heap a chance to adjust counters in response to the
    1632             :     // more precise and smaller old generation size.
    1633             :     heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
    1634             :   }
    1635             :   marking_state->SetLiveBytes(page, 0);
    1636      472331 : }
    1637             : 
    1638       63561 : Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
    1639             :   base::MutexGuard guard(mutex());
    1640             :   // Check for pages that still contain free list entries. Bail out for smaller
    1641             :   // categories.
    1642             :   const int minimum_category =
    1643      127184 :       static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
    1644             :   Page* page = free_list()->GetPageForCategoryType(kHuge);
    1645       63592 :   if (!page && static_cast<int>(kLarge) >= minimum_category)
    1646             :     page = free_list()->GetPageForCategoryType(kLarge);
    1647       63592 :   if (!page && static_cast<int>(kMedium) >= minimum_category)
    1648             :     page = free_list()->GetPageForCategoryType(kMedium);
    1649       63592 :   if (!page && static_cast<int>(kSmall) >= minimum_category)
    1650             :     page = free_list()->GetPageForCategoryType(kSmall);
    1651       63592 :   if (!page && static_cast<int>(kTiny) >= minimum_category)
    1652             :     page = free_list()->GetPageForCategoryType(kTiny);
    1653       63592 :   if (!page && static_cast<int>(kTiniest) >= minimum_category)
    1654             :     page = free_list()->GetPageForCategoryType(kTiniest);
    1655       63592 :   if (!page) return nullptr;
    1656       25484 :   RemovePage(page);
    1657       25484 :   return page;
    1658             : }
    1659             : 
    1660      638495 : size_t PagedSpace::AddPage(Page* page) {
    1661      638495 :   CHECK(page->SweepingDone());
    1662      638495 :   page->set_owner(this);
    1663             :   memory_chunk_list_.PushBack(page);
    1664             :   AccountCommitted(page->size());
    1665             :   IncreaseCapacity(page->area_size());
    1666             :   IncreaseAllocatedBytes(page->allocated_bytes(), page);
    1667     3192477 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1668     1276991 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1669     1276991 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1670             :   }
    1671      638496 :   return RelinkFreeListCategories(page);
    1672             : }
    1673             : 
    1674      170635 : void PagedSpace::RemovePage(Page* page) {
    1675      170635 :   CHECK(page->SweepingDone());
    1676             :   memory_chunk_list_.Remove(page);
    1677             :   UnlinkFreeListCategories(page);
    1678             :   DecreaseAllocatedBytes(page->allocated_bytes(), page);
    1679             :   DecreaseCapacity(page->area_size());
    1680             :   AccountUncommitted(page->size());
    1681      853175 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    1682      341270 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    1683      341270 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    1684             :   }
    1685      170635 : }
    1686             : 
    1687      187032 : size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
    1688      187032 :   size_t unused = page->ShrinkToHighWaterMark();
    1689             :   accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
    1690             :   AccountUncommitted(unused);
    1691      187033 :   return unused;
    1692             : }
    1693             : 
    1694         400 : void PagedSpace::ResetFreeList() {
    1695      376645 :   for (Page* page : *this) {
    1696      189253 :     free_list_.EvictFreeListItems(page);
    1697             :   }
    1698             :   DCHECK(free_list_.IsEmpty());
    1699         400 : }
    1700             : 
    1701      186993 : void PagedSpace::ShrinkImmortalImmovablePages() {
    1702             :   DCHECK(!heap()->deserialization_complete());
    1703      186993 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    1704      186993 :   FreeLinearAllocationArea();
    1705             :   ResetFreeList();
    1706      374005 :   for (Page* page : *this) {
    1707             :     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
    1708      187012 :     ShrinkPageToHighWaterMark(page);
    1709             :   }
    1710      186993 : }
    1711             : 
    1712      478230 : bool PagedSpace::Expand() {
    1713             :   // Always lock against the main space as we can only adjust capacity and
    1714             :   // pages concurrently for the main paged space.
    1715      478230 :   base::MutexGuard guard(heap()->paged_space(identity())->mutex());
    1716             : 
    1717             :   const int size = AreaSize();
    1718             : 
    1719      478234 :   if (!heap()->CanExpandOldGeneration(size)) return false;
    1720             : 
    1721             :   Page* page =
    1722      466375 :       heap()->memory_allocator()->AllocatePage(size, this, executable());
    1723      466373 :   if (page == nullptr) return false;
    1724             :   // Pages created during bootstrapping may contain immortal immovable objects.
    1725      466373 :   if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
    1726      466373 :   AddPage(page);
    1727             :   Free(page->area_start(), page->area_size(),
    1728      466375 :        SpaceAccountingMode::kSpaceAccounted);
    1729      466375 :   heap()->NotifyOldGenerationExpansion();
    1730      466374 :   return true;
    1731             : }
    1732             : 
    1733             : 
    1734      147855 : int PagedSpace::CountTotalPages() {
    1735             :   int count = 0;
    1736      485005 :   for (Page* page : *this) {
    1737      337150 :     count++;
    1738             :     USE(page);
    1739             :   }
    1740      147855 :   return count;
    1741             : }
    1742             : 
    1743             : 
    1744      206538 : void PagedSpace::ResetFreeListStatistics() {
    1745      698626 :   for (Page* page : *this) {
    1746             :     page->ResetFreeListStatistics();
    1747             :   }
    1748      206538 : }
    1749             : 
    1750     1305469 : void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
    1751             :   SetTopAndLimit(top, limit);
    1752     2610895 :   if (top != kNullAddress && top != limit &&
    1753             :       heap()->incremental_marking()->black_allocation()) {
    1754      133661 :     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
    1755             :   }
    1756     1305448 : }
    1757             : 
    1758    22819867 : void PagedSpace::DecreaseLimit(Address new_limit) {
    1759             :   Address old_limit = limit();
    1760             :   DCHECK_LE(top(), new_limit);
    1761             :   DCHECK_GE(old_limit, new_limit);
    1762    22819867 :   if (new_limit != old_limit) {
    1763             :     SetTopAndLimit(top(), new_limit);
    1764       28541 :     Free(new_limit, old_limit - new_limit,
    1765       28541 :          SpaceAccountingMode::kSpaceAccounted);
    1766       28541 :     if (heap()->incremental_marking()->black_allocation()) {
    1767             :       Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
    1768        4814 :                                                                    old_limit);
    1769             :     }
    1770             :   }
    1771    22819867 : }
    1772             : 
    1773    24881809 : Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
    1774             :                                           size_t min_size) {
    1775             :   DCHECK_GE(end - start, min_size);
    1776             : 
    1777    24881809 :   if (heap()->inline_allocation_disabled()) {
    1778             :     // Fit the requested area exactly.
    1779      316979 :     return start + min_size;
    1780    48131882 :   } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
    1781             :     // Generated code may allocate inline from the linear allocation area for.
    1782             :     // To make sure we can observe these allocations, we use a lower limit.
    1783    23050909 :     size_t step = GetNextInlineAllocationStepSize();
    1784             : 
    1785             :     // TODO(ofrobots): there is subtle difference between old space and new
    1786             :     // space here. Any way to avoid it? `step - 1` makes more sense as we would
    1787             :     // like to sample the object that straddles the `start + step` boundary.
    1788             :     // Rounding down further would introduce a small statistical error in
    1789             :     // sampling. However, presently PagedSpace requires limit to be aligned.
    1790             :     size_t rounded_step;
    1791    23050909 :     if (identity() == NEW_SPACE) {
    1792             :       DCHECK_GE(step, 1);
    1793      523804 :       rounded_step = step - 1;
    1794             :     } else {
    1795    22527105 :       rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
    1796             :     }
    1797    46101802 :     return Min(static_cast<Address>(start + min_size + rounded_step), end);
    1798             :   } else {
    1799             :     // The entire node can be used as the linear allocation area.
    1800             :     return end;
    1801             :   }
    1802             : }
    1803             : 
    1804       85290 : void PagedSpace::MarkLinearAllocationAreaBlack() {
    1805             :   DCHECK(heap()->incremental_marking()->black_allocation());
    1806             :   Address current_top = top();
    1807             :   Address current_limit = limit();
    1808       85290 :   if (current_top != kNullAddress && current_top != current_limit) {
    1809             :     Page::FromAllocationAreaAddress(current_top)
    1810       25595 :         ->CreateBlackArea(current_top, current_limit);
    1811             :   }
    1812       85290 : }
    1813             : 
    1814        3321 : void PagedSpace::UnmarkLinearAllocationArea() {
    1815             :   Address current_top = top();
    1816             :   Address current_limit = limit();
    1817        3321 :   if (current_top != kNullAddress && current_top != current_limit) {
    1818             :     Page::FromAllocationAreaAddress(current_top)
    1819        1805 :         ->DestroyBlackArea(current_top, current_limit);
    1820             :   }
    1821        3321 : }
    1822             : 
    1823     2677217 : void PagedSpace::FreeLinearAllocationArea() {
    1824             :   // Mark the old linear allocation area with a free space map so it can be
    1825             :   // skipped when scanning the heap.
    1826             :   Address current_top = top();
    1827             :   Address current_limit = limit();
    1828     2677217 :   if (current_top == kNullAddress) {
    1829             :     DCHECK_EQ(kNullAddress, current_limit);
    1830             :     return;
    1831             :   }
    1832             : 
    1833     1184860 :   if (heap()->incremental_marking()->black_allocation()) {
    1834             :     Page* page = Page::FromAllocationAreaAddress(current_top);
    1835             : 
    1836             :     // Clear the bits in the unused black area.
    1837      147097 :     if (current_top != current_limit) {
    1838             :       IncrementalMarking::MarkingState* marking_state =
    1839             :           heap()->incremental_marking()->marking_state();
    1840      110060 :       marking_state->bitmap(page)->ClearRange(
    1841             :           page->AddressToMarkbitIndex(current_top),
    1842      110060 :           page->AddressToMarkbitIndex(current_limit));
    1843      110060 :       marking_state->IncrementLiveBytes(
    1844             :           page, -static_cast<int>(current_limit - current_top));
    1845             :     }
    1846             :   }
    1847             : 
    1848     1184860 :   InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
    1849             :   SetTopAndLimit(kNullAddress, kNullAddress);
    1850             :   DCHECK_GE(current_limit, current_top);
    1851             : 
    1852             :   // The code page of the linear allocation area needs to be unprotected
    1853             :   // because we are going to write a filler into that memory area below.
    1854     1184845 :   if (identity() == CODE_SPACE) {
    1855             :     heap()->UnprotectAndRegisterMemoryChunk(
    1856       86357 :         MemoryChunk::FromAddress(current_top));
    1857             :   }
    1858     1184845 :   Free(current_top, current_limit - current_top,
    1859     1184845 :        SpaceAccountingMode::kSpaceAccounted);
    1860             : }
    1861             : 
    1862       21243 : void PagedSpace::ReleasePage(Page* page) {
    1863             :   DCHECK_EQ(
    1864             :       0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
    1865             :              page));
    1866             :   DCHECK_EQ(page->owner(), this);
    1867             : 
    1868       21243 :   free_list_.EvictFreeListItems(page);
    1869             :   DCHECK(!free_list_.ContainsPageFreeListItems(page));
    1870             : 
    1871       21243 :   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
    1872             :     DCHECK(!top_on_previous_step_);
    1873             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    1874             :   }
    1875             : 
    1876             :   AccountUncommitted(page->size());
    1877             :   accounting_stats_.DecreaseCapacity(page->area_size());
    1878       21243 :   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    1879       21243 : }
    1880             : 
    1881       15146 : void PagedSpace::SetReadable() {
    1882             :   DCHECK(identity() == CODE_SPACE);
    1883       30572 :   for (Page* page : *this) {
    1884       15426 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1885       15426 :     page->SetReadable();
    1886             :   }
    1887       15146 : }
    1888             : 
    1889      266686 : void PagedSpace::SetReadAndExecutable() {
    1890             :   DCHECK(identity() == CODE_SPACE);
    1891      687355 :   for (Page* page : *this) {
    1892      420669 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1893      420669 :     page->SetReadAndExecutable();
    1894             :   }
    1895      266691 : }
    1896             : 
    1897      281837 : void PagedSpace::SetReadAndWritable() {
    1898             :   DCHECK(identity() == CODE_SPACE);
    1899      654963 :   for (Page* page : *this) {
    1900      373126 :     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    1901      373126 :     page->SetReadAndWritable();
    1902             :   }
    1903      281837 : }
    1904             : 
    1905       31500 : std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
    1906       31500 :   return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
    1907             : }
    1908             : 
    1909     1934303 : bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
    1910             :   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
    1911             :   DCHECK_LE(top(), limit());
    1912             : #ifdef DEBUG
    1913             :   if (top() != limit()) {
    1914             :     DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
    1915             :   }
    1916             : #endif
    1917             :   // Don't free list allocate if there is linear space available.
    1918             :   DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
    1919             : 
    1920             :   // Mark the old linear allocation area with a free space map so it can be
    1921             :   // skipped when scanning the heap.  This also puts it back in the free list
    1922             :   // if it is big enough.
    1923     1934303 :   FreeLinearAllocationArea();
    1924             : 
    1925     1934252 :   if (!is_local()) {
    1926             :     heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    1927             :         heap()->GCFlagsForIncrementalMarking(),
    1928     1549259 :         kGCCallbackScheduleIdleGarbageCollection);
    1929             :   }
    1930             : 
    1931     1934219 :   size_t new_node_size = 0;
    1932     1934219 :   FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
    1933     1934315 :   if (new_node.is_null()) return false;
    1934             : 
    1935             :   DCHECK_GE(new_node_size, size_in_bytes);
    1936             : 
    1937             :   // The old-space-step might have finished sweeping and restarted marking.
    1938             :   // Verify that it did not turn the page of the new node into an evacuation
    1939             :   // candidate.
    1940             :   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
    1941             : 
    1942             :   // Memory in the linear allocation area is counted as allocated.  We may free
    1943             :   // a little of this again immediately - see below.
    1944             :   Page* page = Page::FromHeapObject(new_node);
    1945     1305528 :   IncreaseAllocatedBytes(new_node_size, page);
    1946             : 
    1947             :   Address start = new_node->address();
    1948     1305528 :   Address end = new_node->address() + new_node_size;
    1949     1305528 :   Address limit = ComputeLimit(start, end, size_in_bytes);
    1950             :   DCHECK_LE(limit, end);
    1951             :   DCHECK_LE(size_in_bytes, limit - start);
    1952     1305515 :   if (limit != end) {
    1953      228227 :     if (identity() == CODE_SPACE) {
    1954        2159 :       heap()->UnprotectAndRegisterMemoryChunk(page);
    1955             :     }
    1956      228227 :     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
    1957             :   }
    1958     1305470 :   SetLinearAllocationArea(start, limit);
    1959             : 
    1960     1305438 :   return true;
    1961             : }
    1962             : 
    1963             : #ifdef DEBUG
    1964             : void PagedSpace::Print() {}
    1965             : #endif
    1966             : 
    1967             : #ifdef VERIFY_HEAP
    1968             : void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
    1969             :   bool allocation_pointer_found_in_space =
    1970             :       (allocation_info_.top() == allocation_info_.limit());
    1971             :   size_t external_space_bytes[kNumTypes];
    1972             :   size_t external_page_bytes[kNumTypes];
    1973             : 
    1974             :   for (int i = 0; i < kNumTypes; i++) {
    1975             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1976             :   }
    1977             : 
    1978             :   for (Page* page : *this) {
    1979             :     CHECK(page->owner() == this);
    1980             : 
    1981             :     for (int i = 0; i < kNumTypes; i++) {
    1982             :       external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    1983             :     }
    1984             : 
    1985             :     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
    1986             :       allocation_pointer_found_in_space = true;
    1987             :     }
    1988             :     CHECK(page->SweepingDone());
    1989             :     HeapObjectIterator it(page);
    1990             :     Address end_of_previous_object = page->area_start();
    1991             :     Address top = page->area_end();
    1992             : 
    1993             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    1994             :       CHECK(end_of_previous_object <= object->address());
    1995             : 
    1996             :       // The first word should be a map, and we expect all map pointers to
    1997             :       // be in map space.
    1998             :       Map map = object->map();
    1999             :       CHECK(map->IsMap());
    2000             :       CHECK(heap()->map_space()->Contains(map) ||
    2001             :             heap()->read_only_space()->Contains(map));
    2002             : 
    2003             :       // Perform space-specific object verification.
    2004             :       VerifyObject(object);
    2005             : 
    2006             :       // The object itself should look OK.
    2007             :       object->ObjectVerify(isolate);
    2008             : 
    2009             :       if (!FLAG_verify_heap_skip_remembered_set) {
    2010             :         heap()->VerifyRememberedSetFor(object);
    2011             :       }
    2012             : 
    2013             :       // All the interior pointers should be contained in the heap.
    2014             :       int size = object->Size();
    2015             :       object->IterateBody(map, size, visitor);
    2016             :       CHECK(object->address() + size <= top);
    2017             :       end_of_previous_object = object->address() + size;
    2018             : 
    2019             :       if (object->IsExternalString()) {
    2020             :         ExternalString external_string = ExternalString::cast(object);
    2021             :         size_t size = external_string->ExternalPayloadSize();
    2022             :         external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
    2023             :       } else if (object->IsJSArrayBuffer()) {
    2024             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2025             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2026             :           size_t size = array_buffer->byte_length();
    2027             :           external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2028             :         }
    2029             :       }
    2030             :     }
    2031             :     for (int i = 0; i < kNumTypes; i++) {
    2032             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2033             :       CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
    2034             :       external_space_bytes[t] += external_page_bytes[t];
    2035             :     }
    2036             :   }
    2037             :   for (int i = 0; i < kNumTypes; i++) {
    2038             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2039             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2040             :   }
    2041             :   CHECK(allocation_pointer_found_in_space);
    2042             : #ifdef DEBUG
    2043             :   VerifyCountersAfterSweeping();
    2044             : #endif
    2045             : }
    2046             : 
    2047             : void PagedSpace::VerifyLiveBytes() {
    2048             :   IncrementalMarking::MarkingState* marking_state =
    2049             :       heap()->incremental_marking()->marking_state();
    2050             :   for (Page* page : *this) {
    2051             :     CHECK(page->SweepingDone());
    2052             :     HeapObjectIterator it(page);
    2053             :     int black_size = 0;
    2054             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2055             :       // All the interior pointers should be contained in the heap.
    2056             :       if (marking_state->IsBlack(object)) {
    2057             :         black_size += object->Size();
    2058             :       }
    2059             :     }
    2060             :     CHECK_LE(black_size, marking_state->live_bytes(page));
    2061             :   }
    2062             : }
    2063             : #endif  // VERIFY_HEAP
    2064             : 
    2065             : #ifdef DEBUG
    2066             : void PagedSpace::VerifyCountersAfterSweeping() {
    2067             :   size_t total_capacity = 0;
    2068             :   size_t total_allocated = 0;
    2069             :   for (Page* page : *this) {
    2070             :     DCHECK(page->SweepingDone());
    2071             :     total_capacity += page->area_size();
    2072             :     HeapObjectIterator it(page);
    2073             :     size_t real_allocated = 0;
    2074             :     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
    2075             :       if (!object->IsFiller()) {
    2076             :         real_allocated += object->Size();
    2077             :       }
    2078             :     }
    2079             :     total_allocated += page->allocated_bytes();
    2080             :     // The real size can be smaller than the accounted size if array trimming,
    2081             :     // object slack tracking happened after sweeping.
    2082             :     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
    2083             :     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
    2084             :   }
    2085             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2086             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2087             : }
    2088             : 
    2089             : void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
    2090             :   // We need to refine the counters on pages that are already swept and have
    2091             :   // not been moved over to the actual space. Otherwise, the AccountingStats
    2092             :   // are just an over approximation.
    2093             :   RefillFreeList();
    2094             : 
    2095             :   size_t total_capacity = 0;
    2096             :   size_t total_allocated = 0;
    2097             :   auto marking_state =
    2098             :       heap()->incremental_marking()->non_atomic_marking_state();
    2099             :   for (Page* page : *this) {
    2100             :     size_t page_allocated =
    2101             :         page->SweepingDone()
    2102             :             ? page->allocated_bytes()
    2103             :             : static_cast<size_t>(marking_state->live_bytes(page));
    2104             :     total_capacity += page->area_size();
    2105             :     total_allocated += page_allocated;
    2106             :     DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
    2107             :   }
    2108             :   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
    2109             :   DCHECK_EQ(total_allocated, accounting_stats_.Size());
    2110             : }
    2111             : #endif
    2112             : 
    2113             : // -----------------------------------------------------------------------------
    2114             : // NewSpace implementation
    2115             : 
    2116       62447 : NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2117             :                    size_t initial_semispace_capacity,
    2118             :                    size_t max_semispace_capacity)
    2119             :     : SpaceWithLinearArea(heap, NEW_SPACE),
    2120             :       to_space_(heap, kToSpace),
    2121       62447 :       from_space_(heap, kFromSpace) {
    2122             :   DCHECK(initial_semispace_capacity <= max_semispace_capacity);
    2123             :   DCHECK(
    2124             :       base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
    2125             : 
    2126             :   to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2127             :   from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
    2128       62447 :   if (!to_space_.Commit()) {
    2129           0 :     V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
    2130             :   }
    2131             :   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
    2132       62447 :   ResetLinearAllocationArea();
    2133       62447 : }
    2134             : 
    2135       62437 : void NewSpace::TearDown() {
    2136             :   allocation_info_.Reset(kNullAddress, kNullAddress);
    2137             : 
    2138       62437 :   to_space_.TearDown();
    2139       62437 :   from_space_.TearDown();
    2140       62437 : }
    2141             : 
    2142       94944 : void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
    2143             : 
    2144             : 
    2145        2661 : void NewSpace::Grow() {
    2146             :   // Double the semispace size but only up to maximum capacity.
    2147             :   DCHECK(TotalCapacity() < MaximumCapacity());
    2148             :   size_t new_capacity =
    2149        2661 :       Min(MaximumCapacity(),
    2150             :           static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
    2151        2661 :   if (to_space_.GrowTo(new_capacity)) {
    2152             :     // Only grow from space if we managed to grow to-space.
    2153        2661 :     if (!from_space_.GrowTo(new_capacity)) {
    2154             :       // If we managed to grow to-space but couldn't grow from-space,
    2155             :       // attempt to shrink to-space.
    2156           0 :       if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
    2157             :         // We are in an inconsistent state because we could not
    2158             :         // commit/uncommit memory from new space.
    2159           0 :         FATAL("inconsistent state");
    2160             :       }
    2161             :     }
    2162             :   }
    2163             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2164        2661 : }
    2165             : 
    2166             : 
    2167       17233 : void NewSpace::Shrink() {
    2168       17233 :   size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
    2169             :   size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
    2170       17497 :   if (rounded_new_capacity < TotalCapacity() &&
    2171         264 :       to_space_.ShrinkTo(rounded_new_capacity)) {
    2172             :     // Only shrink from-space if we managed to shrink to-space.
    2173             :     from_space_.Reset();
    2174         264 :     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
    2175             :       // If we managed to shrink to-space but couldn't shrink from
    2176             :       // space, attempt to grow to-space again.
    2177           0 :       if (!to_space_.GrowTo(from_space_.current_capacity())) {
    2178             :         // We are in an inconsistent state because we could not
    2179             :         // commit/uncommit memory from new space.
    2180           0 :         FATAL("inconsistent state");
    2181             :       }
    2182             :     }
    2183             :   }
    2184             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2185       17233 : }
    2186             : 
    2187       68846 : bool NewSpace::Rebalance() {
    2188             :   // Order here is important to make use of the page pool.
    2189      137692 :   return to_space_.EnsureCurrentCapacity() &&
    2190      137692 :          from_space_.EnsureCurrentCapacity();
    2191             : }
    2192             : 
    2193      137692 : bool SemiSpace::EnsureCurrentCapacity() {
    2194      137692 :   if (is_committed()) {
    2195             :     const int expected_pages =
    2196      137692 :         static_cast<int>(current_capacity_ / Page::kPageSize);
    2197             :     MemoryChunk* current_page = first_page();
    2198             :     int actual_pages = 0;
    2199             : 
    2200             :     // First iterate through the pages list until expected pages if so many
    2201             :     // pages exist.
    2202     1724610 :     while (current_page != nullptr && actual_pages < expected_pages) {
    2203      793459 :       actual_pages++;
    2204             :       current_page = current_page->list_node().next();
    2205             :     }
    2206             : 
    2207             :     // Free all overallocated pages which are behind current_page.
    2208        2707 :     while (current_page) {
    2209             :       MemoryChunk* next_current = current_page->list_node().next();
    2210             :       memory_chunk_list_.Remove(current_page);
    2211             :       // Clear new space flags to avoid this page being treated as a new
    2212             :       // space page that is potentially being swept.
    2213             :       current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
    2214             :       heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
    2215        2707 :           current_page);
    2216             :       current_page = next_current;
    2217             :     }
    2218             : 
    2219             :     // Add more pages if we have less than expected_pages.
    2220             :     IncrementalMarking::NonAtomicMarkingState* marking_state =
    2221             :         heap()->incremental_marking()->non_atomic_marking_state();
    2222        4193 :     while (actual_pages < expected_pages) {
    2223        4193 :       actual_pages++;
    2224             :       current_page =
    2225             :           heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2226             :               MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2227        4193 :               NOT_EXECUTABLE);
    2228        4193 :       if (current_page == nullptr) return false;
    2229             :       DCHECK_NOT_NULL(current_page);
    2230             :       memory_chunk_list_.PushBack(current_page);
    2231             :       marking_state->ClearLiveness(current_page);
    2232             :       current_page->SetFlags(first_page()->GetFlags(),
    2233             :                              static_cast<uintptr_t>(Page::kCopyAllFlags));
    2234             :       heap()->CreateFillerObjectAt(current_page->area_start(),
    2235             :                                    static_cast<int>(current_page->area_size()),
    2236        4193 :                                    ClearRecordedSlots::kNo);
    2237             :     }
    2238             :   }
    2239             :   return true;
    2240             : }
    2241             : 
    2242     1130951 : LinearAllocationArea LocalAllocationBuffer::Close() {
    2243     1130951 :   if (IsValid()) {
    2244      109251 :     heap_->CreateFillerObjectAt(
    2245             :         allocation_info_.top(),
    2246             :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2247      218502 :         ClearRecordedSlots::kNo);
    2248      109255 :     const LinearAllocationArea old_info = allocation_info_;
    2249      109255 :     allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
    2250      109255 :     return old_info;
    2251             :   }
    2252     1021700 :   return LinearAllocationArea(kNullAddress, kNullAddress);
    2253             : }
    2254             : 
    2255      406438 : LocalAllocationBuffer::LocalAllocationBuffer(
    2256             :     Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    2257             :     : heap_(heap),
    2258      406438 :       allocation_info_(allocation_info) {
    2259      406438 :   if (IsValid()) {
    2260             :     heap_->CreateFillerObjectAt(
    2261             :         allocation_info_.top(),
    2262             :         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
    2263      198724 :         ClearRecordedSlots::kNo);
    2264             :   }
    2265      406424 : }
    2266             : 
    2267      198936 : LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
    2268             :     V8_NOEXCEPT {
    2269             :   *this = other;
    2270      198944 : }
    2271             : 
    2272      199281 : LocalAllocationBuffer& LocalAllocationBuffer::operator=(
    2273             :     const LocalAllocationBuffer& other) V8_NOEXCEPT {
    2274      398217 :   Close();
    2275      398222 :   heap_ = other.heap_;
    2276      398222 :   allocation_info_ = other.allocation_info_;
    2277             : 
    2278             :   // This is needed since we (a) cannot yet use move-semantics, and (b) want
    2279             :   // to make the use of the class easy by it as value and (c) implicitly call
    2280             :   // {Close} upon copy.
    2281             :   const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
    2282             :       kNullAddress, kNullAddress);
    2283      199278 :   return *this;
    2284             : }
    2285             : 
    2286      311051 : void NewSpace::UpdateLinearAllocationArea() {
    2287             :   // Make sure there is no unaccounted allocations.
    2288             :   DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
    2289             : 
    2290             :   Address new_top = to_space_.page_low();
    2291      311051 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2292             :   allocation_info_.Reset(new_top, to_space_.page_high());
    2293             :   // The order of the following two stores is important.
    2294             :   // See the corresponding loads in ConcurrentMarking::Run.
    2295             :   original_limit_.store(limit(), std::memory_order_relaxed);
    2296             :   original_top_.store(top(), std::memory_order_release);
    2297      311051 :   StartNextInlineAllocationStep();
    2298             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2299      311051 : }
    2300             : 
    2301      157391 : void NewSpace::ResetLinearAllocationArea() {
    2302             :   // Do a step to account for memory allocated so far before resetting.
    2303      157391 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2304             :   to_space_.Reset();
    2305      157391 :   UpdateLinearAllocationArea();
    2306             :   // Clear all mark-bits in the to-space.
    2307             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2308             :       heap()->incremental_marking()->non_atomic_marking_state();
    2309     1042626 :   for (Page* p : to_space_) {
    2310             :     marking_state->ClearLiveness(p);
    2311             :     // Concurrent marking may have local live bytes for this page.
    2312      885235 :     heap()->concurrent_marking()->ClearMemoryChunkData(p);
    2313             :   }
    2314      157391 : }
    2315             : 
    2316      756394 : void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2317      756394 :   Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
    2318             :   allocation_info_.set_limit(new_limit);
    2319             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2320      756394 : }
    2321             : 
    2322    22819899 : void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
    2323    22819899 :   Address new_limit = ComputeLimit(top(), limit(), min_size);
    2324             :   DCHECK_LE(new_limit, limit());
    2325    22819872 :   DecreaseLimit(new_limit);
    2326    22819862 : }
    2327             : 
    2328      173183 : bool NewSpace::AddFreshPage() {
    2329             :   Address top = allocation_info_.top();
    2330             :   DCHECK(!OldSpace::IsAtPageStart(top));
    2331             : 
    2332             :   // Do a step to account for memory allocated on previous page.
    2333      173183 :   InlineAllocationStep(top, top, kNullAddress, 0);
    2334             : 
    2335      173183 :   if (!to_space_.AdvancePage()) {
    2336             :     // No more pages left to advance.
    2337             :     return false;
    2338             :   }
    2339             : 
    2340             :   // Clear remainder of current page.
    2341             :   Address limit = Page::FromAllocationAreaAddress(top)->area_end();
    2342      153660 :   int remaining_in_page = static_cast<int>(limit - top);
    2343      153660 :   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
    2344      153660 :   UpdateLinearAllocationArea();
    2345             : 
    2346      153660 :   return true;
    2347             : }
    2348             : 
    2349             : 
    2350           0 : bool NewSpace::AddFreshPageSynchronized() {
    2351           0 :   base::MutexGuard guard(&mutex_);
    2352           0 :   return AddFreshPage();
    2353             : }
    2354             : 
    2355             : 
    2356      459343 : bool NewSpace::EnsureAllocation(int size_in_bytes,
    2357             :                                 AllocationAlignment alignment) {
    2358             :   Address old_top = allocation_info_.top();
    2359             :   Address high = to_space_.page_high();
    2360      459343 :   int filler_size = Heap::GetFillToAlign(old_top, alignment);
    2361      459343 :   int aligned_size_in_bytes = size_in_bytes + filler_size;
    2362             : 
    2363      459343 :   if (old_top + aligned_size_in_bytes > high) {
    2364             :     // Not enough room in the page, try to allocate a new one.
    2365      172205 :     if (!AddFreshPage()) {
    2366             :       return false;
    2367             :     }
    2368             : 
    2369             :     old_top = allocation_info_.top();
    2370             :     high = to_space_.page_high();
    2371      152739 :     filler_size = Heap::GetFillToAlign(old_top, alignment);
    2372             :   }
    2373             : 
    2374             :   DCHECK(old_top + aligned_size_in_bytes <= high);
    2375             : 
    2376      439877 :   if (allocation_info_.limit() < high) {
    2377             :     // Either the limit has been lowered because linear allocation was disabled
    2378             :     // or because incremental marking wants to get a chance to do a step,
    2379             :     // or because idle scavenge job wants to get a chance to post a task.
    2380             :     // Set the new limit accordingly.
    2381      322742 :     Address new_top = old_top + aligned_size_in_bytes;
    2382      322742 :     Address soon_object = old_top + filler_size;
    2383      322742 :     InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
    2384      322742 :     UpdateInlineAllocationLimit(aligned_size_in_bytes);
    2385             :   }
    2386             :   return true;
    2387             : }
    2388             : 
    2389       95564 : size_t LargeObjectSpace::Available() {
    2390             :   // We return zero here since we cannot take advantage of already allocated
    2391             :   // large object memory.
    2392       95564 :   return 0;
    2393             : }
    2394             : 
    2395   126427476 : void SpaceWithLinearArea::StartNextInlineAllocationStep() {
    2396   126427476 :   if (heap()->allocation_step_in_progress()) {
    2397             :     // If we are mid-way through an existing step, don't start a new one.
    2398             :     return;
    2399             :   }
    2400             : 
    2401   126427507 :   if (AllocationObserversActive()) {
    2402    22752219 :     top_on_previous_step_ = top();
    2403    22752219 :     UpdateInlineAllocationLimit(0);
    2404             :   } else {
    2405             :     DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2406             :   }
    2407             : }
    2408             : 
    2409      215431 : void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
    2410      215431 :   InlineAllocationStep(top(), top(), kNullAddress, 0);
    2411      215431 :   Space::AddAllocationObserver(observer);
    2412             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2413      215431 : }
    2414             : 
    2415      188803 : void SpaceWithLinearArea::RemoveAllocationObserver(
    2416             :     AllocationObserver* observer) {
    2417             :   Address top_for_next_step =
    2418      188803 :       allocation_observers_.size() == 1 ? kNullAddress : top();
    2419      188803 :   InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
    2420      188805 :   Space::RemoveAllocationObserver(observer);
    2421             :   DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
    2422      188805 : }
    2423             : 
    2424      484920 : void SpaceWithLinearArea::PauseAllocationObservers() {
    2425             :   // Do a step to account for memory allocated so far.
    2426      484920 :   InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
    2427             :   Space::PauseAllocationObservers();
    2428             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2429      484920 :   UpdateInlineAllocationLimit(0);
    2430      484920 : }
    2431             : 
    2432      484920 : void SpaceWithLinearArea::ResumeAllocationObservers() {
    2433             :   DCHECK_EQ(kNullAddress, top_on_previous_step_);
    2434             :   Space::ResumeAllocationObservers();
    2435      484920 :   StartNextInlineAllocationStep();
    2436      484920 : }
    2437             : 
    2438     2727327 : void SpaceWithLinearArea::InlineAllocationStep(Address top,
    2439             :                                                Address top_for_next_step,
    2440             :                                                Address soon_object,
    2441             :                                                size_t size) {
    2442     2727327 :   if (heap()->allocation_step_in_progress()) {
    2443             :     // Avoid starting a new step if we are mid-way through an existing one.
    2444             :     return;
    2445             :   }
    2446             : 
    2447     2727329 :   if (top_on_previous_step_) {
    2448      874119 :     if (top < top_on_previous_step_) {
    2449             :       // Generated code decreased the top pointer to do folded allocations.
    2450             :       DCHECK_NE(top, kNullAddress);
    2451             :       DCHECK_EQ(Page::FromAllocationAreaAddress(top),
    2452             :                 Page::FromAllocationAreaAddress(top_on_previous_step_));
    2453           0 :       top_on_previous_step_ = top;
    2454             :     }
    2455      874119 :     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    2456      874119 :     AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
    2457      874120 :     top_on_previous_step_ = top_for_next_step;
    2458             :   }
    2459             : }
    2460             : 
    2461        7875 : std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
    2462        7875 :   return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
    2463             : }
    2464             : 
    2465             : #ifdef VERIFY_HEAP
    2466             : // We do not use the SemiSpaceIterator because verification doesn't assume
    2467             : // that it works (it depends on the invariants we are checking).
    2468             : void NewSpace::Verify(Isolate* isolate) {
    2469             :   // The allocation pointer should be in the space or at the very end.
    2470             :   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
    2471             : 
    2472             :   // There should be objects packed in from the low address up to the
    2473             :   // allocation pointer.
    2474             :   Address current = to_space_.first_page()->area_start();
    2475             :   CHECK_EQ(current, to_space_.space_start());
    2476             : 
    2477             :   size_t external_space_bytes[kNumTypes];
    2478             :   for (int i = 0; i < kNumTypes; i++) {
    2479             :     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2480             :   }
    2481             : 
    2482             :   while (current != top()) {
    2483             :     if (!Page::IsAlignedToPageSize(current)) {
    2484             :       // The allocation pointer should not be in the middle of an object.
    2485             :       CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
    2486             :             current < top());
    2487             : 
    2488             :       HeapObject object = HeapObject::FromAddress(current);
    2489             : 
    2490             :       // The first word should be a map, and we expect all map pointers to
    2491             :       // be in map space or read-only space.
    2492             :       Map map = object->map();
    2493             :       CHECK(map->IsMap());
    2494             :       CHECK(heap()->map_space()->Contains(map) ||
    2495             :             heap()->read_only_space()->Contains(map));
    2496             : 
    2497             :       // The object should not be code or a map.
    2498             :       CHECK(!object->IsMap());
    2499             :       CHECK(!object->IsAbstractCode());
    2500             : 
    2501             :       // The object itself should look OK.
    2502             :       object->ObjectVerify(isolate);
    2503             : 
    2504             :       // All the interior pointers should be contained in the heap.
    2505             :       VerifyPointersVisitor visitor(heap());
    2506             :       int size = object->Size();
    2507             :       object->IterateBody(map, size, &visitor);
    2508             : 
    2509             :       if (object->IsExternalString()) {
    2510             :         ExternalString external_string = ExternalString::cast(object);
    2511             :         size_t size = external_string->ExternalPayloadSize();
    2512             :         external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
    2513             :       } else if (object->IsJSArrayBuffer()) {
    2514             :         JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
    2515             :         if (ArrayBufferTracker::IsTracked(array_buffer)) {
    2516             :           size_t size = array_buffer->byte_length();
    2517             :           external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
    2518             :         }
    2519             :       }
    2520             : 
    2521             :       current += size;
    2522             :     } else {
    2523             :       // At end of page, switch to next page.
    2524             :       Page* page = Page::FromAllocationAreaAddress(current)->next_page();
    2525             :       current = page->area_start();
    2526             :     }
    2527             :   }
    2528             : 
    2529             :   for (int i = 0; i < kNumTypes; i++) {
    2530             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2531             :     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
    2532             :   }
    2533             : 
    2534             :   // Check semi-spaces.
    2535             :   CHECK_EQ(from_space_.id(), kFromSpace);
    2536             :   CHECK_EQ(to_space_.id(), kToSpace);
    2537             :   from_space_.Verify();
    2538             :   to_space_.Verify();
    2539             : }
    2540             : #endif
    2541             : 
    2542             : // -----------------------------------------------------------------------------
    2543             : // SemiSpace implementation
    2544             : 
    2545           0 : void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
    2546             :   DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
    2547      124894 :   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
    2548      124894 :   current_capacity_ = minimum_capacity_;
    2549      124894 :   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
    2550       62447 :   committed_ = false;
    2551           0 : }
    2552             : 
    2553             : 
    2554           0 : void SemiSpace::TearDown() {
    2555             :   // Properly uncommit memory to keep the allocator counters in sync.
    2556      124874 :   if (is_committed()) {
    2557       77089 :     Uncommit();
    2558             :   }
    2559      124874 :   current_capacity_ = maximum_capacity_ = 0;
    2560           0 : }
    2561             : 
    2562             : 
    2563       93044 : bool SemiSpace::Commit() {
    2564             :   DCHECK(!is_committed());
    2565       93044 :   const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
    2566      841956 :   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
    2567             :     Page* new_page =
    2568             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2569             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2570      374456 :             NOT_EXECUTABLE);
    2571      374456 :     if (new_page == nullptr) {
    2572           0 :       if (pages_added) RewindPages(pages_added);
    2573             :       return false;
    2574             :     }
    2575             :     memory_chunk_list_.PushBack(new_page);
    2576             :   }
    2577             :   Reset();
    2578       93044 :   AccountCommitted(current_capacity_);
    2579       93044 :   if (age_mark_ == kNullAddress) {
    2580       79352 :     age_mark_ = first_page()->area_start();
    2581             :   }
    2582       93044 :   committed_ = true;
    2583       93044 :   return true;
    2584             : }
    2585             : 
    2586             : 
    2587       93029 : bool SemiSpace::Uncommit() {
    2588             :   DCHECK(is_committed());
    2589      926767 :   while (!memory_chunk_list_.Empty()) {
    2590             :     MemoryChunk* chunk = memory_chunk_list_.front();
    2591             :     memory_chunk_list_.Remove(chunk);
    2592      416869 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
    2593             :   }
    2594       93029 :   current_page_ = nullptr;
    2595       93029 :   AccountUncommitted(current_capacity_);
    2596       93029 :   committed_ = false;
    2597       93029 :   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2598       93029 :   return true;
    2599             : }
    2600             : 
    2601             : 
    2602           0 : size_t SemiSpace::CommittedPhysicalMemory() {
    2603         342 :   if (!is_committed()) return 0;
    2604             :   size_t size = 0;
    2605        1710 :   for (Page* p : *this) {
    2606        1368 :     size += p->CommittedPhysicalMemory();
    2607             :   }
    2608             :   return size;
    2609             : }
    2610             : 
    2611        5322 : bool SemiSpace::GrowTo(size_t new_capacity) {
    2612        5322 :   if (!is_committed()) {
    2613         227 :     if (!Commit()) return false;
    2614             :   }
    2615             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2616             :   DCHECK_LE(new_capacity, maximum_capacity_);
    2617             :   DCHECK_GT(new_capacity, current_capacity_);
    2618        5322 :   const size_t delta = new_capacity - current_capacity_;
    2619             :   DCHECK(IsAligned(delta, AllocatePageSize()));
    2620        5322 :   const int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2621             :   DCHECK(last_page());
    2622             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    2623             :       heap()->incremental_marking()->non_atomic_marking_state();
    2624       97586 :   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
    2625             :     Page* new_page =
    2626             :         heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
    2627             :             MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
    2628       46132 :             NOT_EXECUTABLE);
    2629       46132 :     if (new_page == nullptr) {
    2630           0 :       if (pages_added) RewindPages(pages_added);
    2631             :       return false;
    2632             :     }
    2633             :     memory_chunk_list_.PushBack(new_page);
    2634             :     marking_state->ClearLiveness(new_page);
    2635             :     // Duplicate the flags that was set on the old page.
    2636             :     new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
    2637             :   }
    2638             :   AccountCommitted(delta);
    2639        5322 :   current_capacity_ = new_capacity;
    2640        5322 :   return true;
    2641             : }
    2642             : 
    2643         528 : void SemiSpace::RewindPages(int num_pages) {
    2644             :   DCHECK_GT(num_pages, 0);
    2645             :   DCHECK(last_page());
    2646        7848 :   while (num_pages > 0) {
    2647             :     MemoryChunk* last = last_page();
    2648             :     memory_chunk_list_.Remove(last);
    2649        3660 :     heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
    2650        3660 :     num_pages--;
    2651             :   }
    2652         528 : }
    2653             : 
    2654         528 : bool SemiSpace::ShrinkTo(size_t new_capacity) {
    2655             :   DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
    2656             :   DCHECK_GE(new_capacity, minimum_capacity_);
    2657             :   DCHECK_LT(new_capacity, current_capacity_);
    2658         528 :   if (is_committed()) {
    2659         528 :     const size_t delta = current_capacity_ - new_capacity;
    2660             :     DCHECK(IsAligned(delta, Page::kPageSize));
    2661         528 :     int delta_pages = static_cast<int>(delta / Page::kPageSize);
    2662         528 :     RewindPages(delta_pages);
    2663             :     AccountUncommitted(delta);
    2664         528 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    2665             :   }
    2666         528 :   current_capacity_ = new_capacity;
    2667         528 :   return true;
    2668             : }
    2669             : 
    2670      189888 : void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
    2671     1458952 :   for (Page* page : *this) {
    2672     1269064 :     page->set_owner(this);
    2673     1269064 :     page->SetFlags(flags, mask);
    2674     1269064 :     if (id_ == kToSpace) {
    2675             :       page->ClearFlag(MemoryChunk::FROM_PAGE);
    2676             :       page->SetFlag(MemoryChunk::TO_PAGE);
    2677             :       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2678             :       heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
    2679             :           page, 0);
    2680             :     } else {
    2681             :       page->SetFlag(MemoryChunk::FROM_PAGE);
    2682             :       page->ClearFlag(MemoryChunk::TO_PAGE);
    2683             :     }
    2684             :     DCHECK(page->InYoungGeneration());
    2685             :   }
    2686      189888 : }
    2687             : 
    2688             : 
    2689           0 : void SemiSpace::Reset() {
    2690             :   DCHECK(first_page());
    2691             :   DCHECK(last_page());
    2692      250699 :   current_page_ = first_page();
    2693      250699 :   pages_used_ = 0;
    2694           0 : }
    2695             : 
    2696        4193 : void SemiSpace::RemovePage(Page* page) {
    2697        4193 :   if (current_page_ == page) {
    2698         295 :     if (page->prev_page()) {
    2699         290 :       current_page_ = page->prev_page();
    2700             :     }
    2701             :   }
    2702             :   memory_chunk_list_.Remove(page);
    2703       20965 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2704        8386 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2705        8386 :     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2706             :   }
    2707        4193 : }
    2708             : 
    2709        2707 : void SemiSpace::PrependPage(Page* page) {
    2710             :   page->SetFlags(current_page()->GetFlags(),
    2711             :                  static_cast<uintptr_t>(Page::kCopyAllFlags));
    2712        2707 :   page->set_owner(this);
    2713             :   memory_chunk_list_.PushFront(page);
    2714        2707 :   pages_used_++;
    2715       13535 :   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    2716        5414 :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2717        5414 :     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
    2718             :   }
    2719        2707 : }
    2720             : 
    2721       94944 : void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
    2722             :   // We won't be swapping semispaces without data in them.
    2723             :   DCHECK(from->first_page());
    2724             :   DCHECK(to->first_page());
    2725             : 
    2726       94944 :   intptr_t saved_to_space_flags = to->current_page()->GetFlags();
    2727             : 
    2728             :   // We swap all properties but id_.
    2729             :   std::swap(from->current_capacity_, to->current_capacity_);
    2730             :   std::swap(from->maximum_capacity_, to->maximum_capacity_);
    2731             :   std::swap(from->minimum_capacity_, to->minimum_capacity_);
    2732             :   std::swap(from->age_mark_, to->age_mark_);
    2733             :   std::swap(from->committed_, to->committed_);
    2734             :   std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
    2735             :   std::swap(from->current_page_, to->current_page_);
    2736             :   std::swap(from->external_backing_store_bytes_,
    2737             :             to->external_backing_store_bytes_);
    2738             : 
    2739       94944 :   to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
    2740       94944 :   from->FixPagesFlags(0, 0);
    2741       94944 : }
    2742             : 
    2743       94944 : void SemiSpace::set_age_mark(Address mark) {
    2744             :   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
    2745       94944 :   age_mark_ = mark;
    2746             :   // Mark all pages up to the one containing mark.
    2747      221434 :   for (Page* p : PageRange(space_start(), mark)) {
    2748             :     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
    2749             :   }
    2750       94944 : }
    2751             : 
    2752           0 : std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
    2753             :   // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
    2754           0 :   UNREACHABLE();
    2755             : }
    2756             : 
    2757             : #ifdef DEBUG
    2758             : void SemiSpace::Print() {}
    2759             : #endif
    2760             : 
    2761             : #ifdef VERIFY_HEAP
    2762             : void SemiSpace::Verify() {
    2763             :   bool is_from_space = (id_ == kFromSpace);
    2764             :   size_t external_backing_store_bytes[kNumTypes];
    2765             : 
    2766             :   for (int i = 0; i < kNumTypes; i++) {
    2767             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    2768             :   }
    2769             : 
    2770             :   for (Page* page : *this) {
    2771             :     CHECK_EQ(page->owner(), this);
    2772             :     CHECK(page->InNewSpace());
    2773             :     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
    2774             :                                         : MemoryChunk::TO_PAGE));
    2775             :     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
    2776             :                                          : MemoryChunk::FROM_PAGE));
    2777             :     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    2778             :     if (!is_from_space) {
    2779             :       // The pointers-from-here-are-interesting flag isn't updated dynamically
    2780             :       // on from-space pages, so it might be out of sync with the marking state.
    2781             :       if (page->heap()->incremental_marking()->IsMarking()) {
    2782             :         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2783             :       } else {
    2784             :         CHECK(
    2785             :             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
    2786             :       }
    2787             :     }
    2788             :     for (int i = 0; i < kNumTypes; i++) {
    2789             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2790             :       external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
    2791             :     }
    2792             : 
    2793             :     CHECK_IMPLIES(page->list_node().prev(),
    2794             :                   page->list_node().prev()->list_node().next() == page);
    2795             :   }
    2796             :   for (int i = 0; i < kNumTypes; i++) {
    2797             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    2798             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    2799             :   }
    2800             : }
    2801             : #endif
    2802             : 
    2803             : #ifdef DEBUG
    2804             : void SemiSpace::AssertValidRange(Address start, Address end) {
    2805             :   // Addresses belong to same semi-space
    2806             :   Page* page = Page::FromAllocationAreaAddress(start);
    2807             :   Page* end_page = Page::FromAllocationAreaAddress(end);
    2808             :   SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
    2809             :   DCHECK_EQ(space, end_page->owner());
    2810             :   // Start address is before end address, either on same page,
    2811             :   // or end address is on a later page in the linked list of
    2812             :   // semi-space pages.
    2813             :   if (page == end_page) {
    2814             :     DCHECK_LE(start, end);
    2815             :   } else {
    2816             :     while (page != end_page) {
    2817             :       page = page->next_page();
    2818             :     }
    2819             :     DCHECK(page);
    2820             :   }
    2821             : }
    2822             : #endif
    2823             : 
    2824             : 
    2825             : // -----------------------------------------------------------------------------
    2826             : // SemiSpaceIterator implementation.
    2827             : 
    2828        7875 : SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
    2829             :   Initialize(space->first_allocatable_address(), space->top());
    2830           0 : }
    2831             : 
    2832             : 
    2833           0 : void SemiSpaceIterator::Initialize(Address start, Address end) {
    2834             :   SemiSpace::AssertValidRange(start, end);
    2835        7875 :   current_ = start;
    2836        7875 :   limit_ = end;
    2837           0 : }
    2838             : 
    2839         251 : size_t NewSpace::CommittedPhysicalMemory() {
    2840         251 :   if (!base::OS::HasLazyCommits()) return CommittedMemory();
    2841         251 :   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2842             :   size_t size = to_space_.CommittedPhysicalMemory();
    2843         251 :   if (from_space_.is_committed()) {
    2844          91 :     size += from_space_.CommittedPhysicalMemory();
    2845             :   }
    2846             :   return size;
    2847             : }
    2848             : 
    2849             : 
    2850             : // -----------------------------------------------------------------------------
    2851             : // Free lists for old object spaces implementation
    2852             : 
    2853             : 
    2854           0 : void FreeListCategory::Reset() {
    2855             :   set_top(FreeSpace());
    2856             :   set_prev(nullptr);
    2857             :   set_next(nullptr);
    2858     2322457 :   available_ = 0;
    2859           0 : }
    2860             : 
    2861      757172 : FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
    2862             :                                              size_t* node_size) {
    2863             :   DCHECK(page()->CanAllocate());
    2864             :   FreeSpace node = top();
    2865     1416829 :   if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
    2866      141904 :     *node_size = 0;
    2867      141904 :     return FreeSpace();
    2868             :   }
    2869             :   set_top(node->next());
    2870      615268 :   *node_size = node->Size();
    2871      615268 :   available_ -= *node_size;
    2872      615268 :   return node;
    2873             : }
    2874             : 
    2875      746376 : FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
    2876             :                                                 size_t* node_size) {
    2877             :   DCHECK(page()->CanAllocate());
    2878             :   FreeSpace prev_non_evac_node;
    2879      746814 :   for (FreeSpace cur_node = top(); !cur_node.is_null();
    2880             :        cur_node = cur_node->next()) {
    2881      690524 :     size_t size = cur_node->size();
    2882      690524 :     if (size >= minimum_size) {
    2883             :       DCHECK_GE(available_, size);
    2884      690305 :       available_ -= size;
    2885      690305 :       if (cur_node == top()) {
    2886             :         set_top(cur_node->next());
    2887             :       }
    2888      690305 :       if (!prev_non_evac_node.is_null()) {
    2889             :         MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
    2890           6 :         if (chunk->owner()->identity() == CODE_SPACE) {
    2891           0 :           chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
    2892             :         }
    2893             :         prev_non_evac_node->set_next(cur_node->next());
    2894             :       }
    2895      690305 :       *node_size = size;
    2896      690305 :       return cur_node;
    2897             :     }
    2898             : 
    2899             :     prev_non_evac_node = cur_node;
    2900             :   }
    2901       56071 :   return FreeSpace();
    2902             : }
    2903             : 
    2904    17790059 : void FreeListCategory::Free(Address start, size_t size_in_bytes,
    2905             :                             FreeMode mode) {
    2906             :   FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
    2907             :   free_space->set_next(top());
    2908             :   set_top(free_space);
    2909    17790059 :   available_ += size_in_bytes;
    2910    17790059 :   if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    2911             :     owner()->AddCategory(this);
    2912             :   }
    2913    17790059 : }
    2914             : 
    2915             : 
    2916       62386 : void FreeListCategory::RepairFreeList(Heap* heap) {
    2917             :   FreeSpace n = top();
    2918       62386 :   while (!n.is_null()) {
    2919             :     MapWordSlot map_location = n.map_slot();
    2920             :     // We can't use .is_null() here because *map_location returns an
    2921             :     // Object (for which "is null" is not defined, as it would be
    2922             :     // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
    2923           0 :     if (map_location.contains_value(kNullAddress)) {
    2924             :       map_location.store(ReadOnlyRoots(heap).free_space_map());
    2925             :     } else {
    2926             :       DCHECK(map_location.contains_value(
    2927             :           ReadOnlyRoots(heap).free_space_map().ptr()));
    2928             :     }
    2929             :     n = n->next();
    2930             :   }
    2931       62386 : }
    2932             : 
    2933           0 : void FreeListCategory::Relink() {
    2934             :   DCHECK(!is_linked());
    2935             :   owner()->AddCategory(this);
    2936           0 : }
    2937             : 
    2938           0 : FreeList::FreeList() : wasted_bytes_(0) {
    2939     6561165 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2940     3028230 :     categories_[i] = nullptr;
    2941             :   }
    2942      504705 :   Reset();
    2943           0 : }
    2944             : 
    2945             : 
    2946      711243 : void FreeList::Reset() {
    2947             :   ForAllFreeListCategories(
    2948             :       [](FreeListCategory* category) { category->Reset(); });
    2949     9246147 :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    2950     4267452 :     categories_[i] = nullptr;
    2951             :   }
    2952      711243 :   ResetStats();
    2953      711242 : }
    2954             : 
    2955    18204603 : size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
    2956             :   Page* page = Page::FromAddress(start);
    2957             :   page->DecreaseAllocatedBytes(size_in_bytes);
    2958             : 
    2959             :   // Blocks have to be a minimum size to hold free list items.
    2960    18204603 :   if (size_in_bytes < kMinBlockSize) {
    2961             :     page->add_wasted_memory(size_in_bytes);
    2962             :     wasted_bytes_ += size_in_bytes;
    2963      413574 :     return size_in_bytes;
    2964             :   }
    2965             : 
    2966             :   // Insert other blocks at the head of a free list of the appropriate
    2967             :   // magnitude.
    2968             :   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
    2969    17791029 :   page->free_list_category(type)->Free(start, size_in_bytes, mode);
    2970             :   DCHECK_EQ(page->AvailableInFreeList(),
    2971             :             page->AvailableInFreeListFromAllocatedBytes());
    2972    17779112 :   return 0;
    2973             : }
    2974             : 
    2975     3193164 : FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    2976             :                                size_t* node_size) {
    2977             :   FreeListCategoryIterator it(this, type);
    2978             :   FreeSpace node;
    2979     3261007 :   while (it.HasNext()) {
    2980             :     FreeListCategory* current = it.Next();
    2981      662381 :     node = current->PickNodeFromList(minimum_size, node_size);
    2982      662412 :     if (!node.is_null()) {
    2983             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2984      594569 :       return node;
    2985             :     }
    2986             :     RemoveCategory(current);
    2987             :   }
    2988     2598626 :   return node;
    2989             : }
    2990             : 
    2991           0 : FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
    2992             :                                   size_t minimum_size, size_t* node_size) {
    2993      441454 :   if (categories_[type] == nullptr) return FreeSpace();
    2994       94762 :   FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
    2995             :   if (!node.is_null()) {
    2996             :     DCHECK(IsVeryLong() || Available() == SumFreeLists());
    2997             :   }
    2998       94762 :   return node;
    2999             : }
    3000             : 
    3001     1339827 : FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
    3002             :                                         size_t* node_size,
    3003             :                                         size_t minimum_size) {
    3004             :   FreeListCategoryIterator it(this, type);
    3005             :   FreeSpace node;
    3006     1395898 :   while (it.HasNext()) {
    3007             :     FreeListCategory* current = it.Next();
    3008      746375 :     node = current->SearchForNodeInList(minimum_size, node_size);
    3009      746376 :     if (!node.is_null()) {
    3010             :       DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3011      690305 :       return node;
    3012             :     }
    3013       56071 :     if (current->is_empty()) {
    3014             :       RemoveCategory(current);
    3015             :     }
    3016             :   }
    3017      649523 :   return node;
    3018             : }
    3019             : 
    3020     1934274 : FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
    3021             :   DCHECK_GE(kMaxBlockSize, size_in_bytes);
    3022             :   FreeSpace node;
    3023             :   // First try the allocation fast path: try to allocate the minimum element
    3024             :   // size of a free list category. This operation is constant time.
    3025             :   FreeListCategoryType type =
    3026             :       SelectFastAllocationFreeListCategoryType(size_in_bytes);
    3027     5127284 :   for (int i = type; i < kHuge && node.is_null(); i++) {
    3028             :     node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
    3029     3193006 :                       node_size);
    3030             :   }
    3031             : 
    3032     1934278 :   if (node.is_null()) {
    3033             :     // Next search the huge list for free list nodes. This takes linear time in
    3034             :     // the number of huge elements.
    3035     1339805 :     node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
    3036             :   }
    3037             : 
    3038     1934302 :   if (node.is_null() && type != kHuge) {
    3039             :     // We didn't find anything in the huge list. Now search the best fitting
    3040             :     // free list for a node that has at least the requested size.
    3041             :     type = SelectFreeListCategoryType(size_in_bytes);
    3042             :     node = TryFindNodeIn(type, size_in_bytes, node_size);
    3043             :   }
    3044             : 
    3045     1934302 :   if (!node.is_null()) {
    3046     1305526 :     Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
    3047             :   }
    3048             : 
    3049             :   DCHECK(IsVeryLong() || Available() == SumFreeLists());
    3050     1934302 :   return node;
    3051             : }
    3052             : 
    3053      220496 : size_t FreeList::EvictFreeListItems(Page* page) {
    3054      220496 :   size_t sum = 0;
    3055     2645940 :   page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
    3056             :     DCHECK_EQ(this, category->owner());
    3057     1322970 :     sum += category->available();
    3058             :     RemoveCategory(category);
    3059             :     category->Reset();
    3060     1322970 :   });
    3061      220496 :   return sum;
    3062             : }
    3063             : 
    3064           0 : bool FreeList::ContainsPageFreeListItems(Page* page) {
    3065           0 :   bool contained = false;
    3066             :   page->ForAllFreeListCategories(
    3067             :       [this, &contained](FreeListCategory* category) {
    3068           0 :         if (category->owner() == this && category->is_linked()) {
    3069           0 :           contained = true;
    3070             :         }
    3071             :       });
    3072           0 :   return contained;
    3073             : }
    3074             : 
    3075           0 : void FreeList::RepairLists(Heap* heap) {
    3076             :   ForAllFreeListCategories(
    3077      124772 :       [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
    3078           0 : }
    3079             : 
    3080           0 : bool FreeList::AddCategory(FreeListCategory* category) {
    3081     7622286 :   FreeListCategoryType type = category->type_;
    3082             :   DCHECK_LT(type, kNumberOfCategories);
    3083     7622286 :   FreeListCategory* top = categories_[type];
    3084             : 
    3085     7622286 :   if (category->is_empty()) return false;
    3086     2535539 :   if (top == category) return false;
    3087             : 
    3088             :   // Common double-linked list insertion.
    3089     1899708 :   if (top != nullptr) {
    3090             :     top->set_prev(category);
    3091             :   }
    3092             :   category->set_next(top);
    3093     1899708 :   categories_[type] = category;
    3094           0 :   return true;
    3095             : }
    3096             : 
    3097           6 : void FreeList::RemoveCategory(FreeListCategory* category) {
    3098     2470492 :   FreeListCategoryType type = category->type_;
    3099             :   DCHECK_LT(type, kNumberOfCategories);
    3100     2470492 :   FreeListCategory* top = categories_[type];
    3101             : 
    3102             :   // Common double-linked list removal.
    3103     2470492 :   if (top == category) {
    3104      489173 :     categories_[type] = category->next();
    3105             :   }
    3106     2470492 :   if (category->prev() != nullptr) {
    3107             :     category->prev()->set_next(category->next());
    3108             :   }
    3109     2470492 :   if (category->next() != nullptr) {
    3110             :     category->next()->set_prev(category->prev());
    3111             :   }
    3112             :   category->set_next(nullptr);
    3113             :   category->set_prev(nullptr);
    3114           6 : }
    3115             : 
    3116           0 : void FreeList::PrintCategories(FreeListCategoryType type) {
    3117             :   FreeListCategoryIterator it(this, type);
    3118             :   PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
    3119           0 :          static_cast<void*>(categories_[type]), type);
    3120           0 :   while (it.HasNext()) {
    3121             :     FreeListCategory* current = it.Next();
    3122           0 :     PrintF("%p -> ", static_cast<void*>(current));
    3123             :   }
    3124           0 :   PrintF("null\n");
    3125           0 : }
    3126             : 
    3127             : 
    3128             : #ifdef DEBUG
    3129             : size_t FreeListCategory::SumFreeList() {
    3130             :   size_t sum = 0;
    3131             :   FreeSpace cur = top();
    3132             :   while (!cur.is_null()) {
    3133             :     // We can't use "cur->map()" here because both cur's map and the
    3134             :     // root can be null during bootstrapping.
    3135             :     DCHECK(cur->map_slot().contains_value(
    3136             :         page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
    3137             :     sum += cur->relaxed_read_size();
    3138             :     cur = cur->next();
    3139             :   }
    3140             :   return sum;
    3141             : }
    3142             : 
    3143             : int FreeListCategory::FreeListLength() {
    3144             :   int length = 0;
    3145             :   FreeSpace cur = top();
    3146             :   while (!cur.is_null()) {
    3147             :     length++;
    3148             :     cur = cur->next();
    3149             :     if (length == kVeryLongFreeList) return length;
    3150             :   }
    3151             :   return length;
    3152             : }
    3153             : 
    3154             : bool FreeList::IsVeryLong() {
    3155             :   int len = 0;
    3156             :   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    3157             :     FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    3158             :     while (it.HasNext()) {
    3159             :       len += it.Next()->FreeListLength();
    3160             :       if (len >= FreeListCategory::kVeryLongFreeList) return true;
    3161             :     }
    3162             :   }
    3163             :   return false;
    3164             : }
    3165             : 
    3166             : 
    3167             : // This can take a very long time because it is linear in the number of entries
    3168             : // on the free list, so it should not be called if FreeListLength returns
    3169             : // kVeryLongFreeList.
    3170             : size_t FreeList::SumFreeLists() {
    3171             :   size_t sum = 0;
    3172             :   ForAllFreeListCategories(
    3173             :       [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
    3174             :   return sum;
    3175             : }
    3176             : #endif
    3177             : 
    3178             : 
    3179             : // -----------------------------------------------------------------------------
    3180             : // OldSpace implementation
    3181             : 
    3182      206538 : void PagedSpace::PrepareForMarkCompact() {
    3183             :   // We don't have a linear allocation area while sweeping.  It will be restored
    3184             :   // on the first allocation after the sweep.
    3185      206538 :   FreeLinearAllocationArea();
    3186             : 
    3187             :   // Clear the free list before a full GC---it will be rebuilt afterward.
    3188      206538 :   free_list_.Reset();
    3189      206538 : }
    3190             : 
    3191    10641955 : size_t PagedSpace::SizeOfObjects() {
    3192    10641955 :   CHECK_GE(limit(), top());
    3193             :   DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
    3194    21283912 :   return Size() - (limit() - top());
    3195             : }
    3196             : 
    3197         443 : bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3198             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3199         443 :   if (collector->sweeping_in_progress()) {
    3200             :     // Wait for the sweeper threads here and complete the sweeping phase.
    3201          10 :     collector->EnsureSweepingCompleted();
    3202             : 
    3203             :     // After waiting for the sweeper threads, there may be new free-list
    3204             :     // entries.
    3205          10 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3206             :   }
    3207             :   return false;
    3208             : }
    3209             : 
    3210       11844 : bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
    3211             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3212       11844 :   if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
    3213           5 :     collector->sweeper()->ParallelSweepSpace(identity(), 0);
    3214           5 :     RefillFreeList();
    3215           5 :     return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
    3216             :   }
    3217             :   return false;
    3218             : }
    3219             : 
    3220     1082035 : bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3221             :   VMState<GC> state(heap()->isolate());
    3222             :   RuntimeCallTimerScope runtime_timer(
    3223     1082035 :       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
    3224     2164073 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3225             : }
    3226             : 
    3227      235686 : bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
    3228      235686 :   return RawSlowRefillLinearAllocationArea(size_in_bytes);
    3229             : }
    3230             : 
    3231     1317722 : bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
    3232             :   // Allocation in this space has failed.
    3233             :   DCHECK_GE(size_in_bytes, 0);
    3234             :   const int kMaxPagesToSweep = 1;
    3235             : 
    3236     1317722 :   if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
    3237             : 
    3238             :   MarkCompactCollector* collector = heap()->mark_compact_collector();
    3239             :   // Sweeping is still in progress.
    3240      554524 :   if (collector->sweeping_in_progress()) {
    3241      125056 :     if (FLAG_concurrent_sweeping && !is_local() &&
    3242       35667 :         !collector->sweeper()->AreSweeperTasksRunning()) {
    3243       21750 :       collector->EnsureSweepingCompleted();
    3244             :     }
    3245             : 
    3246             :     // First try to refill the free-list, concurrent sweeper threads
    3247             :     // may have freed some objects in the meantime.
    3248       89380 :     RefillFreeList();
    3249             : 
    3250             :     // Retry the free list allocation.
    3251       89417 :     if (RefillLinearAllocationAreaFromFreeList(
    3252             :             static_cast<size_t>(size_in_bytes)))
    3253             :       return true;
    3254             : 
    3255             :     // If sweeping is still in progress try to sweep pages.
    3256             :     int max_freed = collector->sweeper()->ParallelSweepSpace(
    3257       59798 :         identity(), size_in_bytes, kMaxPagesToSweep);
    3258       59807 :     RefillFreeList();
    3259       59807 :     if (max_freed >= size_in_bytes) {
    3260       35357 :       if (RefillLinearAllocationAreaFromFreeList(
    3261             :               static_cast<size_t>(size_in_bytes)))
    3262             :         return true;
    3263             :     }
    3264             :   }
    3265             : 
    3266      490380 :   if (is_local()) {
    3267             :     // The main thread may have acquired all swept pages. Try to steal from
    3268             :     // it. This can only happen during young generation evacuation.
    3269       63562 :     PagedSpace* main_space = heap()->paged_space(identity());
    3270       63562 :     Page* page = main_space->RemovePageSafe(size_in_bytes);
    3271       63592 :     if (page != nullptr) {
    3272       25484 :       AddPage(page);
    3273       25484 :       if (RefillLinearAllocationAreaFromFreeList(
    3274             :               static_cast<size_t>(size_in_bytes)))
    3275             :         return true;
    3276             :     }
    3277             :   }
    3278             : 
    3279      478625 :   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
    3280             :     DCHECK((CountTotalPages() > 1) ||
    3281             :            (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
    3282             :     return RefillLinearAllocationAreaFromFreeList(
    3283      466340 :         static_cast<size_t>(size_in_bytes));
    3284             :   }
    3285             : 
    3286             :   // If sweeper threads are active, wait for them at that point and steal
    3287             :   // elements form their free-lists. Allocation may still fail their which
    3288             :   // would indicate that there is not enough memory for the given allocation.
    3289       12287 :   return SweepAndRetryAllocation(size_in_bytes);
    3290             : }
    3291             : 
    3292             : // -----------------------------------------------------------------------------
    3293             : // MapSpace implementation
    3294             : 
    3295             : #ifdef VERIFY_HEAP
    3296             : void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
    3297             : #endif
    3298             : 
    3299       62441 : ReadOnlySpace::ReadOnlySpace(Heap* heap)
    3300             :     : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
    3301      124883 :       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
    3302       62442 : }
    3303             : 
    3304       62890 : void ReadOnlyPage::MakeHeaderRelocatable() {
    3305       62890 :   if (mutex_ != nullptr) {
    3306             :     // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
    3307       62442 :     delete mutex_;
    3308       62442 :     mutex_ = nullptr;
    3309       62442 :     local_tracker_ = nullptr;
    3310       62442 :     reservation_.Reset();
    3311             :   }
    3312       62890 : }
    3313             : 
    3314           0 : void ReadOnlySpace::Forget() {
    3315           0 :   for (Page* p : *this) {
    3316           0 :     heap()->memory_allocator()->PreFreeMemory(p);
    3317             :   }
    3318           0 : }
    3319             : 
    3320      125764 : void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
    3321             :   MemoryAllocator* memory_allocator = heap()->memory_allocator();
    3322      251529 :   for (Page* p : *this) {
    3323             :     ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
    3324      125764 :     if (access == PageAllocator::kRead) {
    3325       62890 :       page->MakeHeaderRelocatable();
    3326             :     }
    3327             : 
    3328             :     // Read only pages don't have valid reservation object so we get proper
    3329             :     // page allocator manually.
    3330             :     v8::PageAllocator* page_allocator =
    3331             :         memory_allocator->page_allocator(page->executable());
    3332      125765 :     CHECK(
    3333             :         SetPermissions(page_allocator, page->address(), page->size(), access));
    3334             :   }
    3335      125765 : }
    3336             : 
    3337             : // After we have booted, we have created a map which represents free space
    3338             : // on the heap.  If there was already a free list then the elements on it
    3339             : // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
    3340             : // fix them.
    3341       62386 : void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
    3342       62386 :   free_list_.RepairLists(heap());
    3343             :   // Each page may have a small free space that is not tracked by a free list.
    3344             :   // Those free spaces still contain null as their map pointer.
    3345             :   // Overwrite them with new fillers.
    3346      124772 :   for (Page* page : *this) {
    3347       62386 :     int size = static_cast<int>(page->wasted_memory());
    3348       62386 :     if (size == 0) {
    3349             :       // If there is no wasted memory then all free space is in the free list.
    3350             :       continue;
    3351             :     }
    3352           0 :     Address start = page->HighWaterMark();
    3353             :     Address end = page->area_end();
    3354           0 :     if (start < end - size) {
    3355             :       // A region at the high watermark is already in free list.
    3356           0 :       HeapObject filler = HeapObject::FromAddress(start);
    3357           0 :       CHECK(filler->IsFiller());
    3358           0 :       start += filler->Size();
    3359             :     }
    3360           0 :     CHECK_EQ(size, static_cast<int>(end - start));
    3361           0 :     heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
    3362             :   }
    3363       62386 : }
    3364             : 
    3365         625 : void ReadOnlySpace::ClearStringPaddingIfNeeded() {
    3366         802 :   if (is_string_padding_cleared_) return;
    3367             : 
    3368         448 :   WritableScope writable_scope(this);
    3369         896 :   for (Page* page : *this) {
    3370             :     HeapObjectIterator iterator(page);
    3371      511224 :     for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
    3372      510776 :       if (o->IsSeqOneByteString()) {
    3373      229824 :         SeqOneByteString::cast(o)->clear_padding();
    3374      280952 :       } else if (o->IsSeqTwoByteString()) {
    3375           0 :         SeqTwoByteString::cast(o)->clear_padding();
    3376             :       }
    3377             :     }
    3378             :   }
    3379         448 :   is_string_padding_cleared_ = true;
    3380             : }
    3381             : 
    3382       62442 : void ReadOnlySpace::MarkAsReadOnly() {
    3383             :   DCHECK(!is_marked_read_only_);
    3384       62890 :   FreeLinearAllocationArea();
    3385       62890 :   is_marked_read_only_ = true;
    3386       62890 :   SetPermissionsForPages(PageAllocator::kRead);
    3387       62442 : }
    3388             : 
    3389       62426 : void ReadOnlySpace::MarkAsReadWrite() {
    3390             :   DCHECK(is_marked_read_only_);
    3391       62874 :   SetPermissionsForPages(PageAllocator::kReadWrite);
    3392       62875 :   is_marked_read_only_ = false;
    3393       62427 : }
    3394             : 
    3395       49432 : Address LargePage::GetAddressToShrink(Address object_address,
    3396             :                                       size_t object_size) {
    3397       49432 :   if (executable() == EXECUTABLE) {
    3398             :     return 0;
    3399             :   }
    3400       29518 :   size_t used_size = ::RoundUp((object_address - address()) + object_size,
    3401             :                                MemoryAllocator::GetCommitPageSize());
    3402       14759 :   if (used_size < CommittedPhysicalMemory()) {
    3403          84 :     return address() + used_size;
    3404             :   }
    3405             :   return 0;
    3406             : }
    3407             : 
    3408          84 : void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
    3409          84 :   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
    3410          84 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3411             :   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
    3412          84 :                                          SlotSet::FREE_EMPTY_BUCKETS);
    3413          84 :   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
    3414          84 :   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
    3415          84 : }
    3416             : 
    3417             : // -----------------------------------------------------------------------------
    3418             : // LargeObjectIterator
    3419             : 
    3420       23625 : LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
    3421       23625 :   current_ = space->first_page();
    3422           0 : }
    3423             : 
    3424       27107 : HeapObject LargeObjectIterator::Next() {
    3425      214231 :   if (current_ == nullptr) return HeapObject();
    3426             : 
    3427             :   HeapObject object = current_->GetObject();
    3428        3482 :   current_ = current_->next_page();
    3429        3482 :   return object;
    3430             : }
    3431             : 
    3432             : // -----------------------------------------------------------------------------
    3433             : // LargeObjectSpace
    3434             : 
    3435       62442 : LargeObjectSpace::LargeObjectSpace(Heap* heap)
    3436       62442 :     : LargeObjectSpace(heap, LO_SPACE) {}
    3437             : 
    3438           0 : LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
    3439      187326 :     : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
    3440             : 
    3441      187281 : void LargeObjectSpace::TearDown() {
    3442      309495 :   while (!memory_chunk_list_.Empty()) {
    3443             :     LargePage* page = first_page();
    3444       61107 :     LOG(heap()->isolate(),
    3445             :         DeleteEvent("LargeObjectChunk",
    3446             :                     reinterpret_cast<void*>(page->address())));
    3447             :     memory_chunk_list_.Remove(page);
    3448       61107 :     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
    3449             :   }
    3450      187281 : }
    3451             : 
    3452       17200 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
    3453       17200 :   return AllocateRaw(object_size, NOT_EXECUTABLE);
    3454             : }
    3455             : 
    3456       57387 : AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
    3457             :                                                Executability executable) {
    3458             :   // Check if we want to force a GC before growing the old space further.
    3459             :   // If so, fail the allocation.
    3460      114759 :   if (!heap()->CanExpandOldGeneration(object_size) ||
    3461       57372 :       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
    3462             :     return AllocationResult::Retry(identity());
    3463             :   }
    3464             : 
    3465       57350 :   LargePage* page = AllocateLargePage(object_size, executable);
    3466       57350 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3467             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3468             :   HeapObject object = page->GetObject();
    3469             :   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
    3470             :       heap()->GCFlagsForIncrementalMarking(),
    3471       57350 :       kGCCallbackScheduleIdleGarbageCollection);
    3472       57350 :   if (heap()->incremental_marking()->black_allocation()) {
    3473             :     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
    3474             :   }
    3475             :   DCHECK_IMPLIES(
    3476             :       heap()->incremental_marking()->black_allocation(),
    3477             :       heap()->incremental_marking()->marking_state()->IsBlack(object));
    3478             :   page->InitializationMemoryFence();
    3479       57350 :   heap()->NotifyOldGenerationExpansion();
    3480       57350 :   AllocationStep(object_size, object->address(), object_size);
    3481       57350 :   return object;
    3482             : }
    3483             : 
    3484       73011 : LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
    3485             :                                                Executability executable) {
    3486       73011 :   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
    3487       73011 :       object_size, this, executable);
    3488       73011 :   if (page == nullptr) return nullptr;
    3489             :   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
    3490             : 
    3491       73011 :   AddPage(page, object_size);
    3492             : 
    3493             :   HeapObject object = page->GetObject();
    3494             : 
    3495             :   heap()->CreateFillerObjectAt(object->address(), object_size,
    3496       73011 :                                ClearRecordedSlots::kNo);
    3497       73011 :   return page;
    3498             : }
    3499             : 
    3500             : 
    3501         753 : size_t LargeObjectSpace::CommittedPhysicalMemory() {
    3502             :   // On a platform that provides lazy committing of memory, we over-account
    3503             :   // the actually committed memory. There is no easy way right now to support
    3504             :   // precise accounting of committed memory in large object space.
    3505         753 :   return CommittedMemory();
    3506             : }
    3507             : 
    3508      537690 : LargePage* CodeLargeObjectSpace::FindPage(Address a) {
    3509      537690 :   const Address key = MemoryChunk::FromAddress(a)->address();
    3510             :   auto it = chunk_map_.find(key);
    3511      537690 :   if (it != chunk_map_.end()) {
    3512        5053 :     LargePage* page = it->second;
    3513        5053 :     CHECK(page->Contains(a));
    3514             :     return page;
    3515             :   }
    3516             :   return nullptr;
    3517             : }
    3518             : 
    3519      137692 : void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
    3520             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3521             :       heap()->incremental_marking()->non_atomic_marking_state();
    3522             :   LargeObjectIterator it(this);
    3523      187124 :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3524       49432 :     if (marking_state->IsBlackOrGrey(obj)) {
    3525             :       Marking::MarkWhite(marking_state->MarkBitFrom(obj));
    3526             :       MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
    3527       49432 :       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
    3528             :       chunk->ResetProgressBar();
    3529             :       marking_state->SetLiveBytes(chunk, 0);
    3530             :     }
    3531             :     DCHECK(marking_state->IsWhite(obj));
    3532             :   }
    3533      137692 : }
    3534             : 
    3535       40180 : void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
    3536      161112 :   for (Address current = reinterpret_cast<Address>(page);
    3537       80556 :        current < reinterpret_cast<Address>(page) + page->size();
    3538             :        current += MemoryChunk::kPageSize) {
    3539       40376 :     chunk_map_[current] = page;
    3540             :   }
    3541       40180 : }
    3542             : 
    3543          24 : void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
    3544         432 :   for (Address current = page->address();
    3545         204 :        current < reinterpret_cast<Address>(page) + page->size();
    3546             :        current += MemoryChunk::kPageSize) {
    3547             :     chunk_map_.erase(current);
    3548             :   }
    3549          24 : }
    3550             : 
    3551        5074 : void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
    3552             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3553             :   DCHECK(page->IsLargePage());
    3554             :   DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
    3555             :   DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
    3556        5074 :   size_t object_size = static_cast<size_t>(page->GetObject()->Size());
    3557        5074 :   static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
    3558        5074 :   AddPage(page, object_size);
    3559             :   page->ClearFlag(MemoryChunk::FROM_PAGE);
    3560             :   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3561        5074 :   page->set_owner(this);
    3562        5074 : }
    3563             : 
    3564       78085 : void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3565       78085 :   size_ += static_cast<int>(page->size());
    3566             :   AccountCommitted(page->size());
    3567       78085 :   objects_size_ += object_size;
    3568       78085 :   page_count_++;
    3569             :   memory_chunk_list_.PushBack(page);
    3570       78085 : }
    3571             : 
    3572       16978 : void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3573       16978 :   size_ -= static_cast<int>(page->size());
    3574             :   AccountUncommitted(page->size());
    3575       16978 :   objects_size_ -= object_size;
    3576       16978 :   page_count_--;
    3577             :   memory_chunk_list_.Remove(page);
    3578       16978 : }
    3579             : 
    3580      206538 : void LargeObjectSpace::FreeUnmarkedObjects() {
    3581             :   LargePage* current = first_page();
    3582             :   IncrementalMarking::NonAtomicMarkingState* marking_state =
    3583             :       heap()->incremental_marking()->non_atomic_marking_state();
    3584             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3585             :   // updating it after every GC.
    3586             :   size_t surviving_object_size = 0;
    3587      311300 :   while (current) {
    3588             :     LargePage* next_current = current->next_page();
    3589       52381 :     HeapObject object = current->GetObject();
    3590             :     DCHECK(!marking_state->IsGrey(object));
    3591       52381 :     size_t size = static_cast<size_t>(object->Size());
    3592       52381 :     if (marking_state->IsBlack(object)) {
    3593             :       Address free_start;
    3594       49432 :       surviving_object_size += size;
    3595       49432 :       if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
    3596             :           0) {
    3597             :         DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
    3598          84 :         current->ClearOutOfLiveRangeSlots(free_start);
    3599             :         const size_t bytes_to_free =
    3600         168 :             current->size() - (free_start - current->address());
    3601          84 :         heap()->memory_allocator()->PartialFreeMemory(
    3602             :             current, free_start, bytes_to_free,
    3603         168 :             current->area_start() + object->Size());
    3604          84 :         size_ -= bytes_to_free;
    3605             :         AccountUncommitted(bytes_to_free);
    3606             :       }
    3607             :     } else {
    3608        2949 :       RemovePage(current, size);
    3609             :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
    3610        2949 :           current);
    3611             :     }
    3612             :     current = next_current;
    3613             :   }
    3614      206538 :   objects_size_ = surviving_object_size;
    3615      206538 : }
    3616             : 
    3617          90 : bool LargeObjectSpace::Contains(HeapObject object) {
    3618             :   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
    3619             : 
    3620          90 :   bool owned = (chunk->owner() == this);
    3621             : 
    3622             :   SLOW_DCHECK(!owned || ContainsSlow(object->address()));
    3623             : 
    3624          90 :   return owned;
    3625             : }
    3626             : 
    3627           0 : bool LargeObjectSpace::ContainsSlow(Address addr) {
    3628           0 :   for (LargePage* page : *this) {
    3629           0 :     if (page->Contains(addr)) return true;
    3630             :   }
    3631             :   return false;
    3632             : }
    3633             : 
    3634       23625 : std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
    3635       23625 :   return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
    3636             : }
    3637             : 
    3638             : #ifdef VERIFY_HEAP
    3639             : // We do not assume that the large object iterator works, because it depends
    3640             : // on the invariants we are checking during verification.
    3641             : void LargeObjectSpace::Verify(Isolate* isolate) {
    3642             :   size_t external_backing_store_bytes[kNumTypes];
    3643             : 
    3644             :   for (int i = 0; i < kNumTypes; i++) {
    3645             :     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    3646             :   }
    3647             : 
    3648             :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3649             :        chunk = chunk->next_page()) {
    3650             :     // Each chunk contains an object that starts at the large object page's
    3651             :     // object area start.
    3652             :     HeapObject object = chunk->GetObject();
    3653             :     Page* page = Page::FromHeapObject(object);
    3654             :     CHECK(object->address() == page->area_start());
    3655             : 
    3656             :     // The first word should be a map, and we expect all map pointers to be
    3657             :     // in map space or read-only space.
    3658             :     Map map = object->map();
    3659             :     CHECK(map->IsMap());
    3660             :     CHECK(heap()->map_space()->Contains(map) ||
    3661             :           heap()->read_only_space()->Contains(map));
    3662             : 
    3663             :     // We have only the following types in the large object space:
    3664             :     if (!(object->IsAbstractCode() || object->IsSeqString() ||
    3665             :           object->IsExternalString() || object->IsThinString() ||
    3666             :           object->IsFixedArray() || object->IsFixedDoubleArray() ||
    3667             :           object->IsWeakFixedArray() || object->IsWeakArrayList() ||
    3668             :           object->IsPropertyArray() || object->IsByteArray() ||
    3669             :           object->IsFeedbackVector() || object->IsBigInt() ||
    3670             :           object->IsFreeSpace() || object->IsFeedbackMetadata() ||
    3671             :           object->IsContext() ||
    3672             :           object->IsUncompiledDataWithoutPreparseData() ||
    3673             :           object->IsPreparseData()) &&
    3674             :         !FLAG_young_generation_large_objects) {
    3675             :       FATAL("Found invalid Object (instance_type=%i) in large object space.",
    3676             :             object->map()->instance_type());
    3677             :     }
    3678             : 
    3679             :     // The object itself should look OK.
    3680             :     object->ObjectVerify(isolate);
    3681             : 
    3682             :     if (!FLAG_verify_heap_skip_remembered_set) {
    3683             :       heap()->VerifyRememberedSetFor(object);
    3684             :     }
    3685             : 
    3686             :     // Byte arrays and strings don't have interior pointers.
    3687             :     if (object->IsAbstractCode()) {
    3688             :       VerifyPointersVisitor code_visitor(heap());
    3689             :       object->IterateBody(map, object->Size(), &code_visitor);
    3690             :     } else if (object->IsFixedArray()) {
    3691             :       FixedArray array = FixedArray::cast(object);
    3692             :       for (int j = 0; j < array->length(); j++) {
    3693             :         Object element = array->get(j);
    3694             :         if (element->IsHeapObject()) {
    3695             :           HeapObject element_object = HeapObject::cast(element);
    3696             :           CHECK(heap()->Contains(element_object));
    3697             :           CHECK(element_object->map()->IsMap());
    3698             :         }
    3699             :       }
    3700             :     } else if (object->IsPropertyArray()) {
    3701             :       PropertyArray array = PropertyArray::cast(object);
    3702             :       for (int j = 0; j < array->length(); j++) {
    3703             :         Object property = array->get(j);
    3704             :         if (property->IsHeapObject()) {
    3705             :           HeapObject property_object = HeapObject::cast(property);
    3706             :           CHECK(heap()->Contains(property_object));
    3707             :           CHECK(property_object->map()->IsMap());
    3708             :         }
    3709             :       }
    3710             :     }
    3711             :     for (int i = 0; i < kNumTypes; i++) {
    3712             :       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3713             :       external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
    3714             :     }
    3715             :   }
    3716             :   for (int i = 0; i < kNumTypes; i++) {
    3717             :     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    3718             :     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
    3719             :   }
    3720             : }
    3721             : #endif
    3722             : 
    3723             : #ifdef DEBUG
    3724             : void LargeObjectSpace::Print() {
    3725             :   StdoutStream os;
    3726             :   LargeObjectIterator it(this);
    3727             :   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
    3728             :     obj->Print(os);
    3729             :   }
    3730             : }
    3731             : 
    3732             : void Page::Print() {
    3733             :   // Make a best-effort to print the objects in the page.
    3734             :   PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
    3735             :          this->owner()->name());
    3736             :   printf(" --------------------------------------\n");
    3737             :   HeapObjectIterator objects(this);
    3738             :   unsigned mark_size = 0;
    3739             :   for (HeapObject object = objects.Next(); !object.is_null();
    3740             :        object = objects.Next()) {
    3741             :     bool is_marked =
    3742             :         heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
    3743             :     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    3744             :     if (is_marked) {
    3745             :       mark_size += object->Size();
    3746             :     }
    3747             :     object->ShortPrint();
    3748             :     PrintF("\n");
    3749             :   }
    3750             :   printf(" --------------------------------------\n");
    3751             :   printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
    3752             :          heap()->incremental_marking()->marking_state()->live_bytes(this));
    3753             : }
    3754             : 
    3755             : #endif  // DEBUG
    3756             : 
    3757       62442 : NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
    3758             :     : LargeObjectSpace(heap, NEW_LO_SPACE),
    3759             :       pending_object_(0),
    3760      124884 :       capacity_(capacity) {}
    3761             : 
    3762       19231 : AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
    3763             :   // Do not allocate more objects if promoting the existing object would exceed
    3764             :   // the old generation capacity.
    3765       19231 :   if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
    3766             :     return AllocationResult::Retry(identity());
    3767             :   }
    3768             : 
    3769             :   // Allocation for the first object must succeed independent from the capacity.
    3770       19229 :   if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
    3771             :     return AllocationResult::Retry(identity());
    3772             :   }
    3773             : 
    3774       15661 :   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
    3775       15661 :   if (page == nullptr) return AllocationResult::Retry(identity());
    3776             : 
    3777             :   // The size of the first object may exceed the capacity.
    3778       31322 :   capacity_ = Max(capacity_, SizeOfObjects());
    3779             : 
    3780             :   HeapObject result = page->GetObject();
    3781             :   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
    3782             :   page->SetFlag(MemoryChunk::TO_PAGE);
    3783             :   pending_object_.store(result->address(), std::memory_order_relaxed);
    3784             : #ifdef ENABLE_MINOR_MC
    3785       15661 :   if (FLAG_minor_mc) {
    3786             :     page->AllocateYoungGenerationBitmap();
    3787             :     heap()
    3788             :         ->minor_mark_compact_collector()
    3789             :         ->non_atomic_marking_state()
    3790           0 :         ->ClearLiveness(page);
    3791             :   }
    3792             : #endif  // ENABLE_MINOR_MC
    3793             :   page->InitializationMemoryFence();
    3794             :   DCHECK(page->IsLargePage());
    3795             :   DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
    3796       15661 :   AllocationStep(object_size, result->address(), object_size);
    3797       15661 :   return result;
    3798             : }
    3799             : 
    3800       13607 : size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
    3801             : 
    3802       94944 : void NewLargeObjectSpace::Flip() {
    3803      124320 :   for (LargePage* chunk = first_page(); chunk != nullptr;
    3804             :        chunk = chunk->next_page()) {
    3805             :     chunk->SetFlag(MemoryChunk::FROM_PAGE);
    3806             :     chunk->ClearFlag(MemoryChunk::TO_PAGE);
    3807             :   }
    3808       94944 : }
    3809             : 
    3810       26098 : void NewLargeObjectSpace::FreeDeadObjects(
    3811             :     const std::function<bool(HeapObject)>& is_dead) {
    3812             :   bool is_marking = heap()->incremental_marking()->IsMarking();
    3813             :   size_t surviving_object_size = 0;
    3814             :   bool freed_pages = false;
    3815       44008 :   for (auto it = begin(); it != end();) {
    3816             :     LargePage* page = *it;
    3817             :     it++;
    3818        8955 :     HeapObject object = page->GetObject();
    3819        8955 :     size_t size = static_cast<size_t>(object->Size());
    3820        8955 :     if (is_dead(object)) {
    3821             :       freed_pages = true;
    3822        8955 :       RemovePage(page, size);
    3823        8955 :       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
    3824        8955 :       if (FLAG_concurrent_marking && is_marking) {
    3825        1224 :         heap()->concurrent_marking()->ClearMemoryChunkData(page);
    3826             :       }
    3827             :     } else {
    3828           0 :       surviving_object_size += size;
    3829             :     }
    3830             :   }
    3831             :   // Right-trimming does not update the objects_size_ counter. We are lazily
    3832             :   // updating it after every GC.
    3833       26098 :   objects_size_ = surviving_object_size;
    3834       26098 :   if (freed_pages) {
    3835        2640 :     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
    3836             :   }
    3837       26098 : }
    3838             : 
    3839      112142 : void NewLargeObjectSpace::SetCapacity(size_t capacity) {
    3840      224284 :   capacity_ = Max(capacity, SizeOfObjects());
    3841      112142 : }
    3842             : 
    3843       62442 : CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
    3844             :     : LargeObjectSpace(heap, CODE_LO_SPACE),
    3845      124884 :       chunk_map_(kInitialChunkMapCapacity) {}
    3846             : 
    3847       40187 : AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
    3848       40187 :   return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
    3849             : }
    3850             : 
    3851       40180 : void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
    3852       40180 :   LargeObjectSpace::AddPage(page, object_size);
    3853       40180 :   InsertChunkMapEntries(page);
    3854       40180 : }
    3855             : 
    3856          24 : void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
    3857          24 :   RemoveChunkMapEntries(page);
    3858          24 :   LargeObjectSpace::RemovePage(page, object_size);
    3859          24 : }
    3860             : 
    3861             : }  // namespace internal
    3862      122036 : }  // namespace v8

Generated by: LCOV version 1.10